hash
stringlengths
64
64
content
stringlengths
0
1.51M
2ae18df137dee9cb1013ffbcc1d7f689b687dd051e9cc611305bd8d49574d4eb
import itertools import math from copy import copy from django.core.exceptions import EmptyResultSet from django.db.models.expressions import Exists, Func, RawSQL, Value from django.db.models.fields import DateTimeField, Field, IntegerField from django.db.models.query_utils import RegisterLookupMixin from django.utils.datastructures import OrderedSet from django.utils.functional import cached_property class Lookup: lookup_name = None prepare_rhs = True can_use_none_as_rhs = False def __init__(self, lhs, rhs): self.lhs, self.rhs = lhs, rhs self.rhs = self.get_prep_lookup() if hasattr(self.lhs, 'get_bilateral_transforms'): bilateral_transforms = self.lhs.get_bilateral_transforms() else: bilateral_transforms = [] if bilateral_transforms: # Warn the user as soon as possible if they are trying to apply # a bilateral transformation on a nested QuerySet: that won't work. from django.db.models.sql.query import Query # avoid circular import if isinstance(rhs, Query): raise NotImplementedError("Bilateral transformations on nested querysets are not implemented.") self.bilateral_transforms = bilateral_transforms def apply_bilateral_transforms(self, value): for transform in self.bilateral_transforms: value = transform(value) return value def batch_process_rhs(self, compiler, connection, rhs=None): if rhs is None: rhs = self.rhs if self.bilateral_transforms: sqls, sqls_params = [], [] for p in rhs: value = Value(p, output_field=self.lhs.output_field) value = self.apply_bilateral_transforms(value) value = value.resolve_expression(compiler.query) sql, sql_params = compiler.compile(value) sqls.append(sql) sqls_params.extend(sql_params) else: _, params = self.get_db_prep_lookup(rhs, connection) sqls, sqls_params = ['%s'] * len(params), params return sqls, sqls_params def get_source_expressions(self): if self.rhs_is_direct_value(): return [self.lhs] return [self.lhs, self.rhs] def set_source_expressions(self, new_exprs): if len(new_exprs) == 1: self.lhs = new_exprs[0] else: self.lhs, self.rhs = new_exprs def get_prep_lookup(self): if hasattr(self.rhs, 'resolve_expression'): return self.rhs if self.prepare_rhs and hasattr(self.lhs.output_field, 'get_prep_value'): return self.lhs.output_field.get_prep_value(self.rhs) return self.rhs def get_db_prep_lookup(self, value, connection): return ('%s', [value]) def process_lhs(self, compiler, connection, lhs=None): lhs = lhs or self.lhs if hasattr(lhs, 'resolve_expression'): lhs = lhs.resolve_expression(compiler.query) return compiler.compile(lhs) def process_rhs(self, compiler, connection): value = self.rhs if self.bilateral_transforms: if self.rhs_is_direct_value(): # Do not call get_db_prep_lookup here as the value will be # transformed before being used for lookup value = Value(value, output_field=self.lhs.output_field) value = self.apply_bilateral_transforms(value) value = value.resolve_expression(compiler.query) if hasattr(value, 'as_sql'): return compiler.compile(value) else: return self.get_db_prep_lookup(value, connection) def rhs_is_direct_value(self): return not hasattr(self.rhs, 'as_sql') def relabeled_clone(self, relabels): new = copy(self) new.lhs = new.lhs.relabeled_clone(relabels) if hasattr(new.rhs, 'relabeled_clone'): new.rhs = new.rhs.relabeled_clone(relabels) return new def get_group_by_cols(self, alias=None): cols = self.lhs.get_group_by_cols() if hasattr(self.rhs, 'get_group_by_cols'): cols.extend(self.rhs.get_group_by_cols()) return cols def as_sql(self, compiler, connection): raise NotImplementedError def as_oracle(self, compiler, connection): # Oracle doesn't allow EXISTS() to be compared to another expression # unless it's wrapped in a CASE WHEN. wrapped = False exprs = [] for expr in (self.lhs, self.rhs): if isinstance(expr, Exists): # XXX: Use Case(When(self.lhs)) once support for boolean # expressions is added to When. sql, params = compiler.compile(expr) sql = 'CASE WHEN %s THEN 1 ELSE 0 END' % sql expr = RawSQL(sql, params) wrapped = True exprs.append(expr) lookup = type(self)(*exprs) if wrapped else self return lookup.as_sql(compiler, connection) @cached_property def contains_aggregate(self): return self.lhs.contains_aggregate or getattr(self.rhs, 'contains_aggregate', False) @cached_property def contains_over_clause(self): return self.lhs.contains_over_clause or getattr(self.rhs, 'contains_over_clause', False) @property def is_summary(self): return self.lhs.is_summary or getattr(self.rhs, 'is_summary', False) class Transform(RegisterLookupMixin, Func): """ RegisterLookupMixin() is first so that get_lookup() and get_transform() first examine self and then check output_field. """ bilateral = False arity = 1 @property def lhs(self): return self.get_source_expressions()[0] def get_bilateral_transforms(self): if hasattr(self.lhs, 'get_bilateral_transforms'): bilateral_transforms = self.lhs.get_bilateral_transforms() else: bilateral_transforms = [] if self.bilateral: bilateral_transforms.append(self.__class__) return bilateral_transforms class BuiltinLookup(Lookup): def process_lhs(self, compiler, connection, lhs=None): lhs_sql, params = super().process_lhs(compiler, connection, lhs) field_internal_type = self.lhs.output_field.get_internal_type() db_type = self.lhs.output_field.db_type(connection=connection) lhs_sql = connection.ops.field_cast_sql( db_type, field_internal_type) % lhs_sql lhs_sql = connection.ops.lookup_cast(self.lookup_name, field_internal_type) % lhs_sql return lhs_sql, list(params) def as_sql(self, compiler, connection): lhs_sql, params = self.process_lhs(compiler, connection) rhs_sql, rhs_params = self.process_rhs(compiler, connection) params.extend(rhs_params) rhs_sql = self.get_rhs_op(connection, rhs_sql) return '%s %s' % (lhs_sql, rhs_sql), params def get_rhs_op(self, connection, rhs): return connection.operators[self.lookup_name] % rhs class FieldGetDbPrepValueMixin: """ Some lookups require Field.get_db_prep_value() to be called on their inputs. """ get_db_prep_lookup_value_is_iterable = False def get_db_prep_lookup(self, value, connection): # For relational fields, use the 'target_field' attribute of the # output_field. field = getattr(self.lhs.output_field, 'target_field', None) get_db_prep_value = getattr(field, 'get_db_prep_value', None) or self.lhs.output_field.get_db_prep_value return ( '%s', [get_db_prep_value(v, connection, prepared=True) for v in value] if self.get_db_prep_lookup_value_is_iterable else [get_db_prep_value(value, connection, prepared=True)] ) class FieldGetDbPrepValueIterableMixin(FieldGetDbPrepValueMixin): """ Some lookups require Field.get_db_prep_value() to be called on each value in an iterable. """ get_db_prep_lookup_value_is_iterable = True def get_prep_lookup(self): if hasattr(self.rhs, 'resolve_expression'): return self.rhs prepared_values = [] for rhs_value in self.rhs: if hasattr(rhs_value, 'resolve_expression'): # An expression will be handled by the database but can coexist # alongside real values. pass elif self.prepare_rhs and hasattr(self.lhs.output_field, 'get_prep_value'): rhs_value = self.lhs.output_field.get_prep_value(rhs_value) prepared_values.append(rhs_value) return prepared_values def process_rhs(self, compiler, connection): if self.rhs_is_direct_value(): # rhs should be an iterable of values. Use batch_process_rhs() # to prepare/transform those values. return self.batch_process_rhs(compiler, connection) else: return super().process_rhs(compiler, connection) def resolve_expression_parameter(self, compiler, connection, sql, param): params = [param] if hasattr(param, 'resolve_expression'): param = param.resolve_expression(compiler.query) if hasattr(param, 'as_sql'): sql, params = param.as_sql(compiler, connection) return sql, params def batch_process_rhs(self, compiler, connection, rhs=None): pre_processed = super().batch_process_rhs(compiler, connection, rhs) # The params list may contain expressions which compile to a # sql/param pair. Zip them to get sql and param pairs that refer to the # same argument and attempt to replace them with the result of # compiling the param step. sql, params = zip(*( self.resolve_expression_parameter(compiler, connection, sql, param) for sql, param in zip(*pre_processed) )) params = itertools.chain.from_iterable(params) return sql, tuple(params) @Field.register_lookup class Exact(FieldGetDbPrepValueMixin, BuiltinLookup): lookup_name = 'exact' def process_rhs(self, compiler, connection): from django.db.models.sql.query import Query if isinstance(self.rhs, Query): if self.rhs.has_limit_one(): # The subquery must select only the pk. self.rhs.clear_select_clause() self.rhs.add_fields(['pk']) else: raise ValueError( 'The QuerySet value for an exact lookup must be limited to ' 'one result using slicing.' ) return super().process_rhs(compiler, connection) @Field.register_lookup class IExact(BuiltinLookup): lookup_name = 'iexact' prepare_rhs = False def process_rhs(self, qn, connection): rhs, params = super().process_rhs(qn, connection) if params: params[0] = connection.ops.prep_for_iexact_query(params[0]) return rhs, params @Field.register_lookup class GreaterThan(FieldGetDbPrepValueMixin, BuiltinLookup): lookup_name = 'gt' @Field.register_lookup class GreaterThanOrEqual(FieldGetDbPrepValueMixin, BuiltinLookup): lookup_name = 'gte' @Field.register_lookup class LessThan(FieldGetDbPrepValueMixin, BuiltinLookup): lookup_name = 'lt' @Field.register_lookup class LessThanOrEqual(FieldGetDbPrepValueMixin, BuiltinLookup): lookup_name = 'lte' class IntegerFieldFloatRounding: """ Allow floats to work as query values for IntegerField. Without this, the decimal portion of the float would always be discarded. """ def get_prep_lookup(self): if isinstance(self.rhs, float): self.rhs = math.ceil(self.rhs) return super().get_prep_lookup() @IntegerField.register_lookup class IntegerGreaterThanOrEqual(IntegerFieldFloatRounding, GreaterThanOrEqual): pass @IntegerField.register_lookup class IntegerLessThan(IntegerFieldFloatRounding, LessThan): pass @Field.register_lookup class In(FieldGetDbPrepValueIterableMixin, BuiltinLookup): lookup_name = 'in' def process_rhs(self, compiler, connection): db_rhs = getattr(self.rhs, '_db', None) if db_rhs is not None and db_rhs != connection.alias: raise ValueError( "Subqueries aren't allowed across different databases. Force " "the inner query to be evaluated using `list(inner_query)`." ) if self.rhs_is_direct_value(): try: rhs = OrderedSet(self.rhs) except TypeError: # Unhashable items in self.rhs rhs = self.rhs if not rhs: raise EmptyResultSet # rhs should be an iterable; use batch_process_rhs() to # prepare/transform those values. sqls, sqls_params = self.batch_process_rhs(compiler, connection, rhs) placeholder = '(' + ', '.join(sqls) + ')' return (placeholder, sqls_params) else: if not getattr(self.rhs, 'has_select_fields', True): self.rhs.clear_select_clause() self.rhs.add_fields(['pk']) return super().process_rhs(compiler, connection) def get_rhs_op(self, connection, rhs): return 'IN %s' % rhs def as_sql(self, compiler, connection): max_in_list_size = connection.ops.max_in_list_size() if self.rhs_is_direct_value() and max_in_list_size and len(self.rhs) > max_in_list_size: return self.split_parameter_list_as_sql(compiler, connection) return super().as_sql(compiler, connection) def split_parameter_list_as_sql(self, compiler, connection): # This is a special case for databases which limit the number of # elements which can appear in an 'IN' clause. max_in_list_size = connection.ops.max_in_list_size() lhs, lhs_params = self.process_lhs(compiler, connection) rhs, rhs_params = self.batch_process_rhs(compiler, connection) in_clause_elements = ['('] params = [] for offset in range(0, len(rhs_params), max_in_list_size): if offset > 0: in_clause_elements.append(' OR ') in_clause_elements.append('%s IN (' % lhs) params.extend(lhs_params) sqls = rhs[offset: offset + max_in_list_size] sqls_params = rhs_params[offset: offset + max_in_list_size] param_group = ', '.join(sqls) in_clause_elements.append(param_group) in_clause_elements.append(')') params.extend(sqls_params) in_clause_elements.append(')') return ''.join(in_clause_elements), params class PatternLookup(BuiltinLookup): param_pattern = '%%%s%%' prepare_rhs = False def get_rhs_op(self, connection, rhs): # Assume we are in startswith. We need to produce SQL like: # col LIKE %s, ['thevalue%'] # For python values we can (and should) do that directly in Python, # but if the value is for example reference to other column, then # we need to add the % pattern match to the lookup by something like # col LIKE othercol || '%%' # So, for Python values we don't need any special pattern, but for # SQL reference values or SQL transformations we need the correct # pattern added. if hasattr(self.rhs, 'as_sql') or self.bilateral_transforms: pattern = connection.pattern_ops[self.lookup_name].format(connection.pattern_esc) return pattern.format(rhs) else: return super().get_rhs_op(connection, rhs) def process_rhs(self, qn, connection): rhs, params = super().process_rhs(qn, connection) if self.rhs_is_direct_value() and params and not self.bilateral_transforms: params[0] = self.param_pattern % connection.ops.prep_for_like_query(params[0]) return rhs, params @Field.register_lookup class Contains(PatternLookup): lookup_name = 'contains' @Field.register_lookup class IContains(Contains): lookup_name = 'icontains' @Field.register_lookup class StartsWith(PatternLookup): lookup_name = 'startswith' param_pattern = '%s%%' @Field.register_lookup class IStartsWith(StartsWith): lookup_name = 'istartswith' @Field.register_lookup class EndsWith(PatternLookup): lookup_name = 'endswith' param_pattern = '%%%s' @Field.register_lookup class IEndsWith(EndsWith): lookup_name = 'iendswith' @Field.register_lookup class Range(FieldGetDbPrepValueIterableMixin, BuiltinLookup): lookup_name = 'range' def get_rhs_op(self, connection, rhs): return "BETWEEN %s AND %s" % (rhs[0], rhs[1]) @Field.register_lookup class IsNull(BuiltinLookup): lookup_name = 'isnull' prepare_rhs = False def as_sql(self, compiler, connection): sql, params = compiler.compile(self.lhs) if self.rhs: return "%s IS NULL" % sql, params else: return "%s IS NOT NULL" % sql, params @Field.register_lookup class Regex(BuiltinLookup): lookup_name = 'regex' prepare_rhs = False def as_sql(self, compiler, connection): if self.lookup_name in connection.operators: return super().as_sql(compiler, connection) else: lhs, lhs_params = self.process_lhs(compiler, connection) rhs, rhs_params = self.process_rhs(compiler, connection) sql_template = connection.ops.regex_lookup(self.lookup_name) return sql_template % (lhs, rhs), lhs_params + rhs_params @Field.register_lookup class IRegex(Regex): lookup_name = 'iregex' class YearLookup(Lookup): def year_lookup_bounds(self, connection, year): output_field = self.lhs.lhs.output_field if isinstance(output_field, DateTimeField): bounds = connection.ops.year_lookup_bounds_for_datetime_field(year) else: bounds = connection.ops.year_lookup_bounds_for_date_field(year) return bounds def as_sql(self, compiler, connection): # Avoid the extract operation if the rhs is a direct value to allow # indexes to be used. if self.rhs_is_direct_value(): # Skip the extract part by directly using the originating field, # that is self.lhs.lhs. lhs_sql, params = self.process_lhs(compiler, connection, self.lhs.lhs) rhs_sql, _ = self.process_rhs(compiler, connection) rhs_sql = self.get_direct_rhs_sql(connection, rhs_sql) start, finish = self.year_lookup_bounds(connection, self.rhs) params.extend(self.get_bound_params(start, finish)) return '%s %s' % (lhs_sql, rhs_sql), params return super().as_sql(compiler, connection) def get_direct_rhs_sql(self, connection, rhs): return connection.operators[self.lookup_name] % rhs def get_bound_params(self, start, finish): raise NotImplementedError( 'subclasses of YearLookup must provide a get_bound_params() method' ) class YearExact(YearLookup, Exact): def get_direct_rhs_sql(self, connection, rhs): return 'BETWEEN %s AND %s' def get_bound_params(self, start, finish): return (start, finish) class YearGt(YearLookup, GreaterThan): def get_bound_params(self, start, finish): return (finish,) class YearGte(YearLookup, GreaterThanOrEqual): def get_bound_params(self, start, finish): return (start,) class YearLt(YearLookup, LessThan): def get_bound_params(self, start, finish): return (start,) class YearLte(YearLookup, LessThanOrEqual): def get_bound_params(self, start, finish): return (finish,)
e4813ce78ca4bc5ed078a9d98465ba764438f7e77a265e46e5a32e01962cdb1a
import collections.abc import copy import datetime import decimal import operator import uuid import warnings from base64 import b64decode, b64encode from functools import partialmethod, total_ordering from django import forms from django.apps import apps from django.conf import settings from django.core import checks, exceptions, validators # When the _meta object was formalized, this exception was moved to # django.core.exceptions. It is retained here for backwards compatibility # purposes. from django.core.exceptions import FieldDoesNotExist # NOQA from django.db import connection, connections, router from django.db.models.constants import LOOKUP_SEP from django.db.models.query_utils import DeferredAttribute, RegisterLookupMixin from django.utils import timezone from django.utils.datastructures import DictWrapper from django.utils.dateparse import ( parse_date, parse_datetime, parse_duration, parse_time, ) from django.utils.duration import duration_microseconds, duration_string from django.utils.functional import Promise, cached_property from django.utils.ipv6 import clean_ipv6_address from django.utils.itercompat import is_iterable from django.utils.text import capfirst from django.utils.translation import gettext_lazy as _ __all__ = [ 'AutoField', 'BLANK_CHOICE_DASH', 'BigAutoField', 'BigIntegerField', 'BinaryField', 'BooleanField', 'CharField', 'CommaSeparatedIntegerField', 'DateField', 'DateTimeField', 'DecimalField', 'DurationField', 'EmailField', 'Empty', 'Field', 'FieldDoesNotExist', 'FilePathField', 'FloatField', 'GenericIPAddressField', 'IPAddressField', 'IntegerField', 'NOT_PROVIDED', 'NullBooleanField', 'PositiveIntegerField', 'PositiveSmallIntegerField', 'SlugField', 'SmallAutoField', 'SmallIntegerField', 'TextField', 'TimeField', 'URLField', 'UUIDField', ] class Empty: pass class NOT_PROVIDED: pass # The values to use for "blank" in SelectFields. Will be appended to the start # of most "choices" lists. BLANK_CHOICE_DASH = [("", "---------")] def _load_field(app_label, model_name, field_name): return apps.get_model(app_label, model_name)._meta.get_field(field_name) # A guide to Field parameters: # # * name: The name of the field specified in the model. # * attname: The attribute to use on the model object. This is the same as # "name", except in the case of ForeignKeys, where "_id" is # appended. # * db_column: The db_column specified in the model (or None). # * column: The database column for this field. This is the same as # "attname", except if db_column is specified. # # Code that introspects values, or does other dynamic things, should use # attname. For example, this gets the primary key value of object "obj": # # getattr(obj, opts.pk.attname) def _empty(of_cls): new = Empty() new.__class__ = of_cls return new def return_None(): return None @total_ordering class Field(RegisterLookupMixin): """Base class for all field types""" # Designates whether empty strings fundamentally are allowed at the # database level. empty_strings_allowed = True empty_values = list(validators.EMPTY_VALUES) # These track each time a Field instance is created. Used to retain order. # The auto_creation_counter is used for fields that Django implicitly # creates, creation_counter is used for all user-specified fields. creation_counter = 0 auto_creation_counter = -1 default_validators = [] # Default set of validators default_error_messages = { 'invalid_choice': _('Value %(value)r is not a valid choice.'), 'null': _('This field cannot be null.'), 'blank': _('This field cannot be blank.'), 'unique': _('%(model_name)s with this %(field_label)s ' 'already exists.'), # Translators: The 'lookup_type' is one of 'date', 'year' or 'month'. # Eg: "Title must be unique for pub_date year" 'unique_for_date': _("%(field_label)s must be unique for " "%(date_field_label)s %(lookup_type)s."), } system_check_deprecated_details = None system_check_removed_details = None # Field flags hidden = False many_to_many = None many_to_one = None one_to_many = None one_to_one = None related_model = None descriptor_class = DeferredAttribute # Generic field type description, usually overridden by subclasses def _description(self): return _('Field of type: %(field_type)s') % { 'field_type': self.__class__.__name__ } description = property(_description) def __init__(self, verbose_name=None, name=None, primary_key=False, max_length=None, unique=False, blank=False, null=False, db_index=False, rel=None, default=NOT_PROVIDED, editable=True, serialize=True, unique_for_date=None, unique_for_month=None, unique_for_year=None, choices=None, help_text='', db_column=None, db_tablespace=None, auto_created=False, validators=(), error_messages=None): self.name = name self.verbose_name = verbose_name # May be set by set_attributes_from_name self._verbose_name = verbose_name # Store original for deconstruction self.primary_key = primary_key self.max_length, self._unique = max_length, unique self.blank, self.null = blank, null self.remote_field = rel self.is_relation = self.remote_field is not None self.default = default self.editable = editable self.serialize = serialize self.unique_for_date = unique_for_date self.unique_for_month = unique_for_month self.unique_for_year = unique_for_year if isinstance(choices, collections.abc.Iterator): choices = list(choices) self.choices = choices self.help_text = help_text self.db_index = db_index self.db_column = db_column self._db_tablespace = db_tablespace self.auto_created = auto_created # Adjust the appropriate creation counter, and save our local copy. if auto_created: self.creation_counter = Field.auto_creation_counter Field.auto_creation_counter -= 1 else: self.creation_counter = Field.creation_counter Field.creation_counter += 1 self._validators = list(validators) # Store for deconstruction later messages = {} for c in reversed(self.__class__.__mro__): messages.update(getattr(c, 'default_error_messages', {})) messages.update(error_messages or {}) self._error_messages = error_messages # Store for deconstruction later self.error_messages = messages def __str__(self): """ Return "app_label.model_label.field_name" for fields attached to models. """ if not hasattr(self, 'model'): return super().__str__() model = self.model app = model._meta.app_label return '%s.%s.%s' % (app, model._meta.object_name, self.name) def __repr__(self): """Display the module, class, and name of the field.""" path = '%s.%s' % (self.__class__.__module__, self.__class__.__qualname__) name = getattr(self, 'name', None) if name is not None: return '<%s: %s>' % (path, name) return '<%s>' % path def check(self, **kwargs): return [ *self._check_field_name(), *self._check_choices(), *self._check_db_index(), *self._check_null_allowed_for_primary_keys(), *self._check_backend_specific_checks(**kwargs), *self._check_validators(), *self._check_deprecation_details(), ] def _check_field_name(self): """ Check if field name is valid, i.e. 1) does not end with an underscore, 2) does not contain "__" and 3) is not "pk". """ if self.name.endswith('_'): return [ checks.Error( 'Field names must not end with an underscore.', obj=self, id='fields.E001', ) ] elif LOOKUP_SEP in self.name: return [ checks.Error( 'Field names must not contain "%s".' % (LOOKUP_SEP,), obj=self, id='fields.E002', ) ] elif self.name == 'pk': return [ checks.Error( "'pk' is a reserved word that cannot be used as a field name.", obj=self, id='fields.E003', ) ] else: return [] def _check_choices(self): if not self.choices: return [] def is_value(value, accept_promise=True): return isinstance(value, (str, Promise) if accept_promise else str) or not is_iterable(value) if is_value(self.choices, accept_promise=False): return [ checks.Error( "'choices' must be an iterable (e.g., a list or tuple).", obj=self, id='fields.E004', ) ] # Expect [group_name, [value, display]] for choices_group in self.choices: try: group_name, group_choices = choices_group except (TypeError, ValueError): # Containing non-pairs break try: if not all( is_value(value) and is_value(human_name) for value, human_name in group_choices ): break except (TypeError, ValueError): # No groups, choices in the form [value, display] value, human_name = group_name, group_choices if not is_value(value) or not is_value(human_name): break # Special case: choices=['ab'] if isinstance(choices_group, str): break else: return [] return [ checks.Error( "'choices' must be an iterable containing " "(actual value, human readable name) tuples.", obj=self, id='fields.E005', ) ] def _check_db_index(self): if self.db_index not in (None, True, False): return [ checks.Error( "'db_index' must be None, True or False.", obj=self, id='fields.E006', ) ] else: return [] def _check_null_allowed_for_primary_keys(self): if (self.primary_key and self.null and not connection.features.interprets_empty_strings_as_nulls): # We cannot reliably check this for backends like Oracle which # consider NULL and '' to be equal (and thus set up # character-based fields a little differently). return [ checks.Error( 'Primary keys must not have null=True.', hint=('Set null=False on the field, or ' 'remove primary_key=True argument.'), obj=self, id='fields.E007', ) ] else: return [] def _check_backend_specific_checks(self, **kwargs): app_label = self.model._meta.app_label for db in connections: if router.allow_migrate(db, app_label, model_name=self.model._meta.model_name): return connections[db].validation.check_field(self, **kwargs) return [] def _check_validators(self): errors = [] for i, validator in enumerate(self.validators): if not callable(validator): errors.append( checks.Error( "All 'validators' must be callable.", hint=( "validators[{i}] ({repr}) isn't a function or " "instance of a validator class.".format( i=i, repr=repr(validator), ) ), obj=self, id='fields.E008', ) ) return errors def _check_deprecation_details(self): if self.system_check_removed_details is not None: return [ checks.Error( self.system_check_removed_details.get( 'msg', '%s has been removed except for support in historical ' 'migrations.' % self.__class__.__name__ ), hint=self.system_check_removed_details.get('hint'), obj=self, id=self.system_check_removed_details.get('id', 'fields.EXXX'), ) ] elif self.system_check_deprecated_details is not None: return [ checks.Warning( self.system_check_deprecated_details.get( 'msg', '%s has been deprecated.' % self.__class__.__name__ ), hint=self.system_check_deprecated_details.get('hint'), obj=self, id=self.system_check_deprecated_details.get('id', 'fields.WXXX'), ) ] return [] def get_col(self, alias, output_field=None): if output_field is None: output_field = self if alias != self.model._meta.db_table or output_field != self: from django.db.models.expressions import Col return Col(alias, self, output_field) else: return self.cached_col @cached_property def cached_col(self): from django.db.models.expressions import Col return Col(self.model._meta.db_table, self) def select_format(self, compiler, sql, params): """ Custom format for select clauses. For example, GIS columns need to be selected as AsText(table.col) on MySQL as the table.col data can't be used by Django. """ return sql, params def deconstruct(self): """ Return enough information to recreate the field as a 4-tuple: * The name of the field on the model, if contribute_to_class() has been run. * The import path of the field, including the class:e.g. django.db.models.IntegerField This should be the most portable version, so less specific may be better. * A list of positional arguments. * A dict of keyword arguments. Note that the positional or keyword arguments must contain values of the following types (including inner values of collection types): * None, bool, str, int, float, complex, set, frozenset, list, tuple, dict * UUID * datetime.datetime (naive), datetime.date * top-level classes, top-level functions - will be referenced by their full import path * Storage instances - these have their own deconstruct() method This is because the values here must be serialized into a text format (possibly new Python code, possibly JSON) and these are the only types with encoding handlers defined. There's no need to return the exact way the field was instantiated this time, just ensure that the resulting field is the same - prefer keyword arguments over positional ones, and omit parameters with their default values. """ # Short-form way of fetching all the default parameters keywords = {} possibles = { "verbose_name": None, "primary_key": False, "max_length": None, "unique": False, "blank": False, "null": False, "db_index": False, "default": NOT_PROVIDED, "editable": True, "serialize": True, "unique_for_date": None, "unique_for_month": None, "unique_for_year": None, "choices": None, "help_text": '', "db_column": None, "db_tablespace": None, "auto_created": False, "validators": [], "error_messages": None, } attr_overrides = { "unique": "_unique", "error_messages": "_error_messages", "validators": "_validators", "verbose_name": "_verbose_name", "db_tablespace": "_db_tablespace", } equals_comparison = {"choices", "validators"} for name, default in possibles.items(): value = getattr(self, attr_overrides.get(name, name)) # Unroll anything iterable for choices into a concrete list if name == "choices" and isinstance(value, collections.abc.Iterable): value = list(value) # Do correct kind of comparison if name in equals_comparison: if value != default: keywords[name] = value else: if value is not default: keywords[name] = value # Work out path - we shorten it for known Django core fields path = "%s.%s" % (self.__class__.__module__, self.__class__.__qualname__) if path.startswith("django.db.models.fields.related"): path = path.replace("django.db.models.fields.related", "django.db.models") elif path.startswith("django.db.models.fields.files"): path = path.replace("django.db.models.fields.files", "django.db.models") elif path.startswith("django.db.models.fields.proxy"): path = path.replace("django.db.models.fields.proxy", "django.db.models") elif path.startswith("django.db.models.fields"): path = path.replace("django.db.models.fields", "django.db.models") # Return basic info - other fields should override this. return (self.name, path, [], keywords) def clone(self): """ Uses deconstruct() to clone a new copy of this Field. Will not preserve any class attachments/attribute names. """ name, path, args, kwargs = self.deconstruct() return self.__class__(*args, **kwargs) def __eq__(self, other): # Needed for @total_ordering if isinstance(other, Field): return self.creation_counter == other.creation_counter return NotImplemented def __lt__(self, other): # This is needed because bisect does not take a comparison function. if isinstance(other, Field): return self.creation_counter < other.creation_counter return NotImplemented def __hash__(self): return hash(self.creation_counter) def __deepcopy__(self, memodict): # We don't have to deepcopy very much here, since most things are not # intended to be altered after initial creation. obj = copy.copy(self) if self.remote_field: obj.remote_field = copy.copy(self.remote_field) if hasattr(self.remote_field, 'field') and self.remote_field.field is self: obj.remote_field.field = obj memodict[id(self)] = obj return obj def __copy__(self): # We need to avoid hitting __reduce__, so define this # slightly weird copy construct. obj = Empty() obj.__class__ = self.__class__ obj.__dict__ = self.__dict__.copy() return obj def __reduce__(self): """ Pickling should return the model._meta.fields instance of the field, not a new copy of that field. So, use the app registry to load the model and then the field back. """ if not hasattr(self, 'model'): # Fields are sometimes used without attaching them to models (for # example in aggregation). In this case give back a plain field # instance. The code below will create a new empty instance of # class self.__class__, then update its dict with self.__dict__ # values - so, this is very close to normal pickle. state = self.__dict__.copy() # The _get_default cached_property can't be pickled due to lambda # usage. state.pop('_get_default', None) return _empty, (self.__class__,), state return _load_field, (self.model._meta.app_label, self.model._meta.object_name, self.name) def get_pk_value_on_save(self, instance): """ Hook to generate new PK values on save. This method is called when saving instances with no primary key value set. If this method returns something else than None, then the returned value is used when saving the new instance. """ if self.default: return self.get_default() return None def to_python(self, value): """ Convert the input value into the expected Python data type, raising django.core.exceptions.ValidationError if the data can't be converted. Return the converted value. Subclasses should override this. """ return value @cached_property def validators(self): """ Some validators can't be created at field initialization time. This method provides a way to delay their creation until required. """ return [*self.default_validators, *self._validators] def run_validators(self, value): if value in self.empty_values: return errors = [] for v in self.validators: try: v(value) except exceptions.ValidationError as e: if hasattr(e, 'code') and e.code in self.error_messages: e.message = self.error_messages[e.code] errors.extend(e.error_list) if errors: raise exceptions.ValidationError(errors) def validate(self, value, model_instance): """ Validate value and raise ValidationError if necessary. Subclasses should override this to provide validation logic. """ if not self.editable: # Skip validation for non-editable fields. return if self.choices is not None and value not in self.empty_values: for option_key, option_value in self.choices: if isinstance(option_value, (list, tuple)): # This is an optgroup, so look inside the group for # options. for optgroup_key, optgroup_value in option_value: if value == optgroup_key: return elif value == option_key: return raise exceptions.ValidationError( self.error_messages['invalid_choice'], code='invalid_choice', params={'value': value}, ) if value is None and not self.null: raise exceptions.ValidationError(self.error_messages['null'], code='null') if not self.blank and value in self.empty_values: raise exceptions.ValidationError(self.error_messages['blank'], code='blank') def clean(self, value, model_instance): """ Convert the value's type and run validation. Validation errors from to_python() and validate() are propagated. Return the correct value if no error is raised. """ value = self.to_python(value) self.validate(value, model_instance) self.run_validators(value) return value def db_type_parameters(self, connection): return DictWrapper(self.__dict__, connection.ops.quote_name, 'qn_') def db_check(self, connection): """ Return the database column check constraint for this field, for the provided connection. Works the same way as db_type() for the case that get_internal_type() does not map to a preexisting model field. """ data = self.db_type_parameters(connection) try: return connection.data_type_check_constraints[self.get_internal_type()] % data except KeyError: return None def db_type(self, connection): """ Return the database column data type for this field, for the provided connection. """ # The default implementation of this method looks at the # backend-specific data_types dictionary, looking up the field by its # "internal type". # # A Field class can implement the get_internal_type() method to specify # which *preexisting* Django Field class it's most similar to -- i.e., # a custom field might be represented by a TEXT column type, which is # the same as the TextField Django field type, which means the custom # field's get_internal_type() returns 'TextField'. # # But the limitation of the get_internal_type() / data_types approach # is that it cannot handle database column types that aren't already # mapped to one of the built-in Django field types. In this case, you # can implement db_type() instead of get_internal_type() to specify # exactly which wacky database column type you want to use. data = self.db_type_parameters(connection) try: return connection.data_types[self.get_internal_type()] % data except KeyError: return None def rel_db_type(self, connection): """ Return the data type that a related field pointing to this field should use. For example, this method is called by ForeignKey and OneToOneField to determine its data type. """ return self.db_type(connection) def cast_db_type(self, connection): """Return the data type to use in the Cast() function.""" db_type = connection.ops.cast_data_types.get(self.get_internal_type()) if db_type: return db_type % self.db_type_parameters(connection) return self.db_type(connection) def db_parameters(self, connection): """ Extension of db_type(), providing a range of different return values (type, checks). This will look at db_type(), allowing custom model fields to override it. """ type_string = self.db_type(connection) check_string = self.db_check(connection) return { "type": type_string, "check": check_string, } def db_type_suffix(self, connection): return connection.data_types_suffix.get(self.get_internal_type()) def get_db_converters(self, connection): if hasattr(self, 'from_db_value'): return [self.from_db_value] return [] @property def unique(self): return self._unique or self.primary_key @property def db_tablespace(self): return self._db_tablespace or settings.DEFAULT_INDEX_TABLESPACE def set_attributes_from_name(self, name): self.name = self.name or name self.attname, self.column = self.get_attname_column() self.concrete = self.column is not None if self.verbose_name is None and self.name: self.verbose_name = self.name.replace('_', ' ') def contribute_to_class(self, cls, name, private_only=False): """ Register the field with the model class it belongs to. If private_only is True, create a separate instance of this field for every subclass of cls, even if cls is not an abstract model. """ self.set_attributes_from_name(name) self.model = cls cls._meta.add_field(self, private=private_only) if self.column: # Don't override classmethods with the descriptor. This means that # if you have a classmethod and a field with the same name, then # such fields can't be deferred (we don't have a check for this). if not getattr(cls, self.attname, None): setattr(cls, self.attname, self.descriptor_class(self)) if self.choices is not None: setattr(cls, 'get_%s_display' % self.name, partialmethod(cls._get_FIELD_display, field=self)) def get_filter_kwargs_for_object(self, obj): """ Return a dict that when passed as kwargs to self.model.filter(), would yield all instances having the same value for this field as obj has. """ return {self.name: getattr(obj, self.attname)} def get_attname(self): return self.name def get_attname_column(self): attname = self.get_attname() column = self.db_column or attname return attname, column def get_internal_type(self): return self.__class__.__name__ def pre_save(self, model_instance, add): """Return field's value just before saving.""" return getattr(model_instance, self.attname) def get_prep_value(self, value): """Perform preliminary non-db specific value checks and conversions.""" if isinstance(value, Promise): value = value._proxy____cast() return value def get_db_prep_value(self, value, connection, prepared=False): """ Return field's value prepared for interacting with the database backend. Used by the default implementations of get_db_prep_save(). """ if not prepared: value = self.get_prep_value(value) return value def get_db_prep_save(self, value, connection): """Return field's value prepared for saving into a database.""" return self.get_db_prep_value(value, connection=connection, prepared=False) def has_default(self): """Return a boolean of whether this field has a default value.""" return self.default is not NOT_PROVIDED def get_default(self): """Return the default value for this field.""" return self._get_default() @cached_property def _get_default(self): if self.has_default(): if callable(self.default): return self.default return lambda: self.default if not self.empty_strings_allowed or self.null and not connection.features.interprets_empty_strings_as_nulls: return return_None return str # return empty string def get_choices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH, limit_choices_to=None, ordering=()): """ Return choices with a default blank choices included, for use as <select> choices for this field. """ if self.choices is not None: choices = list(self.choices) if include_blank: blank_defined = any(choice in ('', None) for choice, _ in self.flatchoices) if not blank_defined: choices = blank_choice + choices return choices rel_model = self.remote_field.model limit_choices_to = limit_choices_to or self.get_limit_choices_to() choice_func = operator.attrgetter( self.remote_field.get_related_field().attname if hasattr(self.remote_field, 'get_related_field') else 'pk' ) return (blank_choice if include_blank else []) + [ (choice_func(x), str(x)) for x in rel_model._default_manager.complex_filter(limit_choices_to).order_by(*ordering) ] def value_to_string(self, obj): """ Return a string value of this field from the passed obj. This is used by the serialization framework. """ return str(self.value_from_object(obj)) def _get_flatchoices(self): """Flattened version of choices tuple.""" if self.choices is None: return [] flat = [] for choice, value in self.choices: if isinstance(value, (list, tuple)): flat.extend(value) else: flat.append((choice, value)) return flat flatchoices = property(_get_flatchoices) def save_form_data(self, instance, data): setattr(instance, self.name, data) def formfield(self, form_class=None, choices_form_class=None, **kwargs): """Return a django.forms.Field instance for this field.""" defaults = { 'required': not self.blank, 'label': capfirst(self.verbose_name), 'help_text': self.help_text, } if self.has_default(): if callable(self.default): defaults['initial'] = self.default defaults['show_hidden_initial'] = True else: defaults['initial'] = self.get_default() if self.choices is not None: # Fields with choices get special treatment. include_blank = (self.blank or not (self.has_default() or 'initial' in kwargs)) defaults['choices'] = self.get_choices(include_blank=include_blank) defaults['coerce'] = self.to_python if self.null: defaults['empty_value'] = None if choices_form_class is not None: form_class = choices_form_class else: form_class = forms.TypedChoiceField # Many of the subclass-specific formfield arguments (min_value, # max_value) don't apply for choice fields, so be sure to only pass # the values that TypedChoiceField will understand. for k in list(kwargs): if k not in ('coerce', 'empty_value', 'choices', 'required', 'widget', 'label', 'initial', 'help_text', 'error_messages', 'show_hidden_initial', 'disabled'): del kwargs[k] defaults.update(kwargs) if form_class is None: form_class = forms.CharField return form_class(**defaults) def value_from_object(self, obj): """Return the value of this field in the given model instance.""" return getattr(obj, self.attname) class AutoField(Field): description = _("Integer") empty_strings_allowed = False default_error_messages = { 'invalid': _('“%(value)s” value must be an integer.'), } def __init__(self, *args, **kwargs): kwargs['blank'] = True super().__init__(*args, **kwargs) def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_primary_key(), ] def _check_primary_key(self): if not self.primary_key: return [ checks.Error( 'AutoFields must set primary_key=True.', obj=self, id='fields.E100', ), ] else: return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() del kwargs['blank'] kwargs['primary_key'] = True return name, path, args, kwargs def get_internal_type(self): return "AutoField" def to_python(self, value): if value is None: return value try: return int(value) except (TypeError, ValueError): raise exceptions.ValidationError( self.error_messages['invalid'], code='invalid', params={'value': value}, ) def rel_db_type(self, connection): return IntegerField().db_type(connection=connection) def validate(self, value, model_instance): pass def get_db_prep_value(self, value, connection, prepared=False): if not prepared: value = self.get_prep_value(value) value = connection.ops.validate_autopk_value(value) return value def get_prep_value(self, value): from django.db.models.expressions import OuterRef value = super().get_prep_value(value) if value is None or isinstance(value, OuterRef): return value try: return int(value) except (TypeError, ValueError) as e: raise e.__class__( "Field '%s' expected a number but got %r." % (self.name, value), ) from e def contribute_to_class(self, cls, name, **kwargs): assert not cls._meta.auto_field, "Model %s can't have more than one AutoField." % cls._meta.label super().contribute_to_class(cls, name, **kwargs) cls._meta.auto_field = self def formfield(self, **kwargs): return None class BigAutoField(AutoField): description = _("Big (8 byte) integer") def get_internal_type(self): return "BigAutoField" def rel_db_type(self, connection): return BigIntegerField().db_type(connection=connection) class SmallAutoField(AutoField): description = _('Small integer') def get_internal_type(self): return 'SmallAutoField' def rel_db_type(self, connection): return SmallIntegerField().db_type(connection=connection) class BooleanField(Field): empty_strings_allowed = False default_error_messages = { 'invalid': _('“%(value)s” value must be either True or False.'), 'invalid_nullable': _('“%(value)s” value must be either True, False, or None.'), } description = _("Boolean (Either True or False)") def get_internal_type(self): return "BooleanField" def to_python(self, value): if self.null and value in self.empty_values: return None if value in (True, False): # 1/0 are equal to True/False. bool() converts former to latter. return bool(value) if value in ('t', 'True', '1'): return True if value in ('f', 'False', '0'): return False raise exceptions.ValidationError( self.error_messages['invalid_nullable' if self.null else 'invalid'], code='invalid', params={'value': value}, ) def get_prep_value(self, value): value = super().get_prep_value(value) if value is None: return None return self.to_python(value) def formfield(self, **kwargs): if self.choices is not None: include_blank = not (self.has_default() or 'initial' in kwargs) defaults = {'choices': self.get_choices(include_blank=include_blank)} else: form_class = forms.NullBooleanField if self.null else forms.BooleanField # In HTML checkboxes, 'required' means "must be checked" which is # different from the choices case ("must select some value"). # required=False allows unchecked checkboxes. defaults = {'form_class': form_class, 'required': False} return super().formfield(**{**defaults, **kwargs}) class CharField(Field): description = _("String (up to %(max_length)s)") def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.validators.append(validators.MaxLengthValidator(self.max_length)) def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_max_length_attribute(**kwargs), ] def _check_max_length_attribute(self, **kwargs): if self.max_length is None: return [ checks.Error( "CharFields must define a 'max_length' attribute.", obj=self, id='fields.E120', ) ] elif (not isinstance(self.max_length, int) or isinstance(self.max_length, bool) or self.max_length <= 0): return [ checks.Error( "'max_length' must be a positive integer.", obj=self, id='fields.E121', ) ] else: return [] def cast_db_type(self, connection): if self.max_length is None: return connection.ops.cast_char_field_without_max_length return super().cast_db_type(connection) def get_internal_type(self): return "CharField" def to_python(self, value): if isinstance(value, str) or value is None: return value return str(value) def get_prep_value(self, value): value = super().get_prep_value(value) return self.to_python(value) def formfield(self, **kwargs): # Passing max_length to forms.CharField means that the value's length # will be validated twice. This is considered acceptable since we want # the value in the form field (to pass into widget for example). defaults = {'max_length': self.max_length} # TODO: Handle multiple backends with different feature flags. if self.null and not connection.features.interprets_empty_strings_as_nulls: defaults['empty_value'] = None defaults.update(kwargs) return super().formfield(**defaults) class CommaSeparatedIntegerField(CharField): default_validators = [validators.validate_comma_separated_integer_list] description = _("Comma-separated integers") system_check_removed_details = { 'msg': ( 'CommaSeparatedIntegerField is removed except for support in ' 'historical migrations.' ), 'hint': ( 'Use CharField(validators=[validate_comma_separated_integer_list]) ' 'instead.' ), 'id': 'fields.E901', } class DateTimeCheckMixin: def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_mutually_exclusive_options(), *self._check_fix_default_value(), ] def _check_mutually_exclusive_options(self): # auto_now, auto_now_add, and default are mutually exclusive # options. The use of more than one of these options together # will trigger an Error mutually_exclusive_options = [self.auto_now_add, self.auto_now, self.has_default()] enabled_options = [option not in (None, False) for option in mutually_exclusive_options].count(True) if enabled_options > 1: return [ checks.Error( "The options auto_now, auto_now_add, and default " "are mutually exclusive. Only one of these options " "may be present.", obj=self, id='fields.E160', ) ] else: return [] def _check_fix_default_value(self): return [] class DateField(DateTimeCheckMixin, Field): empty_strings_allowed = False default_error_messages = { 'invalid': _('“%(value)s” value has an invalid date format. It must be ' 'in YYYY-MM-DD format.'), 'invalid_date': _('“%(value)s” value has the correct format (YYYY-MM-DD) ' 'but it is an invalid date.'), } description = _("Date (without time)") def __init__(self, verbose_name=None, name=None, auto_now=False, auto_now_add=False, **kwargs): self.auto_now, self.auto_now_add = auto_now, auto_now_add if auto_now or auto_now_add: kwargs['editable'] = False kwargs['blank'] = True super().__init__(verbose_name, name, **kwargs) def _check_fix_default_value(self): """ Warn that using an actual date or datetime value is probably wrong; it's only evaluated on server startup. """ if not self.has_default(): return [] now = timezone.now() if not timezone.is_naive(now): now = timezone.make_naive(now, timezone.utc) value = self.default if isinstance(value, datetime.datetime): if not timezone.is_naive(value): value = timezone.make_naive(value, timezone.utc) value = value.date() elif isinstance(value, datetime.date): # Nothing to do, as dates don't have tz information pass else: # No explicit date / datetime value -- no checks necessary return [] offset = datetime.timedelta(days=1) lower = (now - offset).date() upper = (now + offset).date() if lower <= value <= upper: return [ checks.Warning( 'Fixed default value provided.', hint='It seems you set a fixed date / time / datetime ' 'value as default for this field. This may not be ' 'what you want. If you want to have the current date ' 'as default, use `django.utils.timezone.now`', obj=self, id='fields.W161', ) ] return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.auto_now: kwargs['auto_now'] = True if self.auto_now_add: kwargs['auto_now_add'] = True if self.auto_now or self.auto_now_add: del kwargs['editable'] del kwargs['blank'] return name, path, args, kwargs def get_internal_type(self): return "DateField" def to_python(self, value): if value is None: return value if isinstance(value, datetime.datetime): if settings.USE_TZ and timezone.is_aware(value): # Convert aware datetimes to the default time zone # before casting them to dates (#17742). default_timezone = timezone.get_default_timezone() value = timezone.make_naive(value, default_timezone) return value.date() if isinstance(value, datetime.date): return value try: parsed = parse_date(value) if parsed is not None: return parsed except ValueError: raise exceptions.ValidationError( self.error_messages['invalid_date'], code='invalid_date', params={'value': value}, ) raise exceptions.ValidationError( self.error_messages['invalid'], code='invalid', params={'value': value}, ) def pre_save(self, model_instance, add): if self.auto_now or (self.auto_now_add and add): value = datetime.date.today() setattr(model_instance, self.attname, value) return value else: return super().pre_save(model_instance, add) def contribute_to_class(self, cls, name, **kwargs): super().contribute_to_class(cls, name, **kwargs) if not self.null: setattr( cls, 'get_next_by_%s' % self.name, partialmethod(cls._get_next_or_previous_by_FIELD, field=self, is_next=True) ) setattr( cls, 'get_previous_by_%s' % self.name, partialmethod(cls._get_next_or_previous_by_FIELD, field=self, is_next=False) ) def get_prep_value(self, value): value = super().get_prep_value(value) return self.to_python(value) def get_db_prep_value(self, value, connection, prepared=False): # Casts dates into the format expected by the backend if not prepared: value = self.get_prep_value(value) return connection.ops.adapt_datefield_value(value) def value_to_string(self, obj): val = self.value_from_object(obj) return '' if val is None else val.isoformat() def formfield(self, **kwargs): return super().formfield(**{ 'form_class': forms.DateField, **kwargs, }) class DateTimeField(DateField): empty_strings_allowed = False default_error_messages = { 'invalid': _('“%(value)s” value has an invalid format. It must be in ' 'YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ] format.'), 'invalid_date': _("“%(value)s” value has the correct format " "(YYYY-MM-DD) but it is an invalid date."), 'invalid_datetime': _('“%(value)s” value has the correct format ' '(YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ]) ' 'but it is an invalid date/time.'), } description = _("Date (with time)") # __init__ is inherited from DateField def _check_fix_default_value(self): """ Warn that using an actual date or datetime value is probably wrong; it's only evaluated on server startup. """ if not self.has_default(): return [] now = timezone.now() if not timezone.is_naive(now): now = timezone.make_naive(now, timezone.utc) value = self.default if isinstance(value, datetime.datetime): second_offset = datetime.timedelta(seconds=10) lower = now - second_offset upper = now + second_offset if timezone.is_aware(value): value = timezone.make_naive(value, timezone.utc) elif isinstance(value, datetime.date): second_offset = datetime.timedelta(seconds=10) lower = now - second_offset lower = datetime.datetime(lower.year, lower.month, lower.day) upper = now + second_offset upper = datetime.datetime(upper.year, upper.month, upper.day) value = datetime.datetime(value.year, value.month, value.day) else: # No explicit date / datetime value -- no checks necessary return [] if lower <= value <= upper: return [ checks.Warning( 'Fixed default value provided.', hint='It seems you set a fixed date / time / datetime ' 'value as default for this field. This may not be ' 'what you want. If you want to have the current date ' 'as default, use `django.utils.timezone.now`', obj=self, id='fields.W161', ) ] return [] def get_internal_type(self): return "DateTimeField" def to_python(self, value): if value is None: return value if isinstance(value, datetime.datetime): return value if isinstance(value, datetime.date): value = datetime.datetime(value.year, value.month, value.day) if settings.USE_TZ: # For backwards compatibility, interpret naive datetimes in # local time. This won't work during DST change, but we can't # do much about it, so we let the exceptions percolate up the # call stack. warnings.warn("DateTimeField %s.%s received a naive datetime " "(%s) while time zone support is active." % (self.model.__name__, self.name, value), RuntimeWarning) default_timezone = timezone.get_default_timezone() value = timezone.make_aware(value, default_timezone) return value try: parsed = parse_datetime(value) if parsed is not None: return parsed except ValueError: raise exceptions.ValidationError( self.error_messages['invalid_datetime'], code='invalid_datetime', params={'value': value}, ) try: parsed = parse_date(value) if parsed is not None: return datetime.datetime(parsed.year, parsed.month, parsed.day) except ValueError: raise exceptions.ValidationError( self.error_messages['invalid_date'], code='invalid_date', params={'value': value}, ) raise exceptions.ValidationError( self.error_messages['invalid'], code='invalid', params={'value': value}, ) def pre_save(self, model_instance, add): if self.auto_now or (self.auto_now_add and add): value = timezone.now() setattr(model_instance, self.attname, value) return value else: return super().pre_save(model_instance, add) # contribute_to_class is inherited from DateField, it registers # get_next_by_FOO and get_prev_by_FOO def get_prep_value(self, value): value = super().get_prep_value(value) value = self.to_python(value) if value is not None and settings.USE_TZ and timezone.is_naive(value): # For backwards compatibility, interpret naive datetimes in local # time. This won't work during DST change, but we can't do much # about it, so we let the exceptions percolate up the call stack. try: name = '%s.%s' % (self.model.__name__, self.name) except AttributeError: name = '(unbound)' warnings.warn("DateTimeField %s received a naive datetime (%s)" " while time zone support is active." % (name, value), RuntimeWarning) default_timezone = timezone.get_default_timezone() value = timezone.make_aware(value, default_timezone) return value def get_db_prep_value(self, value, connection, prepared=False): # Casts datetimes into the format expected by the backend if not prepared: value = self.get_prep_value(value) return connection.ops.adapt_datetimefield_value(value) def value_to_string(self, obj): val = self.value_from_object(obj) return '' if val is None else val.isoformat() def formfield(self, **kwargs): return super().formfield(**{ 'form_class': forms.DateTimeField, **kwargs, }) class DecimalField(Field): empty_strings_allowed = False default_error_messages = { 'invalid': _('“%(value)s” value must be a decimal number.'), } description = _("Decimal number") def __init__(self, verbose_name=None, name=None, max_digits=None, decimal_places=None, **kwargs): self.max_digits, self.decimal_places = max_digits, decimal_places super().__init__(verbose_name, name, **kwargs) def check(self, **kwargs): errors = super().check(**kwargs) digits_errors = [ *self._check_decimal_places(), *self._check_max_digits(), ] if not digits_errors: errors.extend(self._check_decimal_places_and_max_digits(**kwargs)) else: errors.extend(digits_errors) return errors def _check_decimal_places(self): try: decimal_places = int(self.decimal_places) if decimal_places < 0: raise ValueError() except TypeError: return [ checks.Error( "DecimalFields must define a 'decimal_places' attribute.", obj=self, id='fields.E130', ) ] except ValueError: return [ checks.Error( "'decimal_places' must be a non-negative integer.", obj=self, id='fields.E131', ) ] else: return [] def _check_max_digits(self): try: max_digits = int(self.max_digits) if max_digits <= 0: raise ValueError() except TypeError: return [ checks.Error( "DecimalFields must define a 'max_digits' attribute.", obj=self, id='fields.E132', ) ] except ValueError: return [ checks.Error( "'max_digits' must be a positive integer.", obj=self, id='fields.E133', ) ] else: return [] def _check_decimal_places_and_max_digits(self, **kwargs): if int(self.decimal_places) > int(self.max_digits): return [ checks.Error( "'max_digits' must be greater or equal to 'decimal_places'.", obj=self, id='fields.E134', ) ] return [] @cached_property def validators(self): return super().validators + [ validators.DecimalValidator(self.max_digits, self.decimal_places) ] @cached_property def context(self): return decimal.Context(prec=self.max_digits) def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.max_digits is not None: kwargs['max_digits'] = self.max_digits if self.decimal_places is not None: kwargs['decimal_places'] = self.decimal_places return name, path, args, kwargs def get_internal_type(self): return "DecimalField" def to_python(self, value): if value is None: return value if isinstance(value, float): return self.context.create_decimal_from_float(value) try: return decimal.Decimal(value) except decimal.InvalidOperation: raise exceptions.ValidationError( self.error_messages['invalid'], code='invalid', params={'value': value}, ) def get_db_prep_save(self, value, connection): return connection.ops.adapt_decimalfield_value(self.to_python(value), self.max_digits, self.decimal_places) def get_prep_value(self, value): value = super().get_prep_value(value) return self.to_python(value) def formfield(self, **kwargs): return super().formfield(**{ 'max_digits': self.max_digits, 'decimal_places': self.decimal_places, 'form_class': forms.DecimalField, **kwargs, }) class DurationField(Field): """ Store timedelta objects. Use interval on PostgreSQL, INTERVAL DAY TO SECOND on Oracle, and bigint of microseconds on other databases. """ empty_strings_allowed = False default_error_messages = { 'invalid': _('“%(value)s” value has an invalid format. It must be in ' '[DD] [[HH:]MM:]ss[.uuuuuu] format.') } description = _("Duration") def get_internal_type(self): return "DurationField" def to_python(self, value): if value is None: return value if isinstance(value, datetime.timedelta): return value try: parsed = parse_duration(value) except ValueError: pass else: if parsed is not None: return parsed raise exceptions.ValidationError( self.error_messages['invalid'], code='invalid', params={'value': value}, ) def get_db_prep_value(self, value, connection, prepared=False): if connection.features.has_native_duration_field: return value if value is None: return None return duration_microseconds(value) def get_db_converters(self, connection): converters = [] if not connection.features.has_native_duration_field: converters.append(connection.ops.convert_durationfield_value) return converters + super().get_db_converters(connection) def value_to_string(self, obj): val = self.value_from_object(obj) return '' if val is None else duration_string(val) def formfield(self, **kwargs): return super().formfield(**{ 'form_class': forms.DurationField, **kwargs, }) class EmailField(CharField): default_validators = [validators.validate_email] description = _("Email address") def __init__(self, *args, **kwargs): # max_length=254 to be compliant with RFCs 3696 and 5321 kwargs.setdefault('max_length', 254) super().__init__(*args, **kwargs) def deconstruct(self): name, path, args, kwargs = super().deconstruct() # We do not exclude max_length if it matches default as we want to change # the default in future. return name, path, args, kwargs def formfield(self, **kwargs): # As with CharField, this will cause email validation to be performed # twice. return super().formfield(**{ 'form_class': forms.EmailField, **kwargs, }) class FilePathField(Field): description = _("File path") def __init__(self, verbose_name=None, name=None, path='', match=None, recursive=False, allow_files=True, allow_folders=False, **kwargs): self.path, self.match, self.recursive = path, match, recursive self.allow_files, self.allow_folders = allow_files, allow_folders kwargs.setdefault('max_length', 100) super().__init__(verbose_name, name, **kwargs) def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_allowing_files_or_folders(**kwargs), ] def _check_allowing_files_or_folders(self, **kwargs): if not self.allow_files and not self.allow_folders: return [ checks.Error( "FilePathFields must have either 'allow_files' or 'allow_folders' set to True.", obj=self, id='fields.E140', ) ] return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.path != '': kwargs['path'] = self.path if self.match is not None: kwargs['match'] = self.match if self.recursive is not False: kwargs['recursive'] = self.recursive if self.allow_files is not True: kwargs['allow_files'] = self.allow_files if self.allow_folders is not False: kwargs['allow_folders'] = self.allow_folders if kwargs.get("max_length") == 100: del kwargs["max_length"] return name, path, args, kwargs def get_prep_value(self, value): value = super().get_prep_value(value) if value is None: return None return str(value) def formfield(self, **kwargs): return super().formfield(**{ 'path': self.path() if callable(self.path) else self.path, 'match': self.match, 'recursive': self.recursive, 'form_class': forms.FilePathField, 'allow_files': self.allow_files, 'allow_folders': self.allow_folders, **kwargs, }) def get_internal_type(self): return "FilePathField" class FloatField(Field): empty_strings_allowed = False default_error_messages = { 'invalid': _('“%(value)s” value must be a float.'), } description = _("Floating point number") def get_prep_value(self, value): value = super().get_prep_value(value) if value is None: return None try: return float(value) except (TypeError, ValueError) as e: raise e.__class__( "Field '%s' expected a number but got %r." % (self.name, value), ) from e def get_internal_type(self): return "FloatField" def to_python(self, value): if value is None: return value try: return float(value) except (TypeError, ValueError): raise exceptions.ValidationError( self.error_messages['invalid'], code='invalid', params={'value': value}, ) def formfield(self, **kwargs): return super().formfield(**{ 'form_class': forms.FloatField, **kwargs, }) class IntegerField(Field): empty_strings_allowed = False default_error_messages = { 'invalid': _('“%(value)s” value must be an integer.'), } description = _("Integer") def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_max_length_warning(), ] def _check_max_length_warning(self): if self.max_length is not None: return [ checks.Warning( "'max_length' is ignored when used with %s." % self.__class__.__name__, hint="Remove 'max_length' from field", obj=self, id='fields.W122', ) ] return [] @cached_property def validators(self): # These validators can't be added at field initialization time since # they're based on values retrieved from `connection`. validators_ = super().validators internal_type = self.get_internal_type() min_value, max_value = connection.ops.integer_field_range(internal_type) if min_value is not None and not any( ( isinstance(validator, validators.MinValueValidator) and ( validator.limit_value() if callable(validator.limit_value) else validator.limit_value ) >= min_value ) for validator in validators_ ): validators_.append(validators.MinValueValidator(min_value)) if max_value is not None and not any( ( isinstance(validator, validators.MaxValueValidator) and ( validator.limit_value() if callable(validator.limit_value) else validator.limit_value ) <= max_value ) for validator in validators_ ): validators_.append(validators.MaxValueValidator(max_value)) return validators_ def get_prep_value(self, value): value = super().get_prep_value(value) if value is None: return None try: return int(value) except (TypeError, ValueError) as e: raise e.__class__( "Field '%s' expected a number but got %r." % (self.name, value), ) from e def get_internal_type(self): return "IntegerField" def to_python(self, value): if value is None: return value try: return int(value) except (TypeError, ValueError): raise exceptions.ValidationError( self.error_messages['invalid'], code='invalid', params={'value': value}, ) def formfield(self, **kwargs): return super().formfield(**{ 'form_class': forms.IntegerField, **kwargs, }) class BigIntegerField(IntegerField): description = _("Big (8 byte) integer") MAX_BIGINT = 9223372036854775807 def get_internal_type(self): return "BigIntegerField" def formfield(self, **kwargs): return super().formfield(**{ 'min_value': -BigIntegerField.MAX_BIGINT - 1, 'max_value': BigIntegerField.MAX_BIGINT, **kwargs, }) class IPAddressField(Field): empty_strings_allowed = False description = _("IPv4 address") system_check_removed_details = { 'msg': ( 'IPAddressField has been removed except for support in ' 'historical migrations.' ), 'hint': 'Use GenericIPAddressField instead.', 'id': 'fields.E900', } def __init__(self, *args, **kwargs): kwargs['max_length'] = 15 super().__init__(*args, **kwargs) def deconstruct(self): name, path, args, kwargs = super().deconstruct() del kwargs['max_length'] return name, path, args, kwargs def get_prep_value(self, value): value = super().get_prep_value(value) if value is None: return None return str(value) def get_internal_type(self): return "IPAddressField" class GenericIPAddressField(Field): empty_strings_allowed = False description = _("IP address") default_error_messages = {} def __init__(self, verbose_name=None, name=None, protocol='both', unpack_ipv4=False, *args, **kwargs): self.unpack_ipv4 = unpack_ipv4 self.protocol = protocol self.default_validators, invalid_error_message = \ validators.ip_address_validators(protocol, unpack_ipv4) self.default_error_messages['invalid'] = invalid_error_message kwargs['max_length'] = 39 super().__init__(verbose_name, name, *args, **kwargs) def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_blank_and_null_values(**kwargs), ] def _check_blank_and_null_values(self, **kwargs): if not getattr(self, 'null', False) and getattr(self, 'blank', False): return [ checks.Error( 'GenericIPAddressFields cannot have blank=True if null=False, ' 'as blank values are stored as nulls.', obj=self, id='fields.E150', ) ] return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.unpack_ipv4 is not False: kwargs['unpack_ipv4'] = self.unpack_ipv4 if self.protocol != "both": kwargs['protocol'] = self.protocol if kwargs.get("max_length") == 39: del kwargs['max_length'] return name, path, args, kwargs def get_internal_type(self): return "GenericIPAddressField" def to_python(self, value): if value is None: return None if not isinstance(value, str): value = str(value) value = value.strip() if ':' in value: return clean_ipv6_address(value, self.unpack_ipv4, self.error_messages['invalid']) return value def get_db_prep_value(self, value, connection, prepared=False): if not prepared: value = self.get_prep_value(value) return connection.ops.adapt_ipaddressfield_value(value) def get_prep_value(self, value): value = super().get_prep_value(value) if value is None: return None if value and ':' in value: try: return clean_ipv6_address(value, self.unpack_ipv4) except exceptions.ValidationError: pass return str(value) def formfield(self, **kwargs): return super().formfield(**{ 'protocol': self.protocol, 'form_class': forms.GenericIPAddressField, **kwargs, }) class NullBooleanField(BooleanField): default_error_messages = { 'invalid': _('“%(value)s” value must be either None, True or False.'), 'invalid_nullable': _('“%(value)s” value must be either None, True or False.'), } description = _("Boolean (Either True, False or None)") def __init__(self, *args, **kwargs): kwargs['null'] = True kwargs['blank'] = True super().__init__(*args, **kwargs) def deconstruct(self): name, path, args, kwargs = super().deconstruct() del kwargs['null'] del kwargs['blank'] return name, path, args, kwargs def get_internal_type(self): return "NullBooleanField" class PositiveIntegerRelDbTypeMixin: def rel_db_type(self, connection): """ Return the data type that a related field pointing to this field should use. In most cases, a foreign key pointing to a positive integer primary key will have an integer column data type but some databases (e.g. MySQL) have an unsigned integer type. In that case (related_fields_match_type=True), the primary key should return its db_type. """ if connection.features.related_fields_match_type: return self.db_type(connection) else: return IntegerField().db_type(connection=connection) class PositiveIntegerField(PositiveIntegerRelDbTypeMixin, IntegerField): description = _("Positive integer") def get_internal_type(self): return "PositiveIntegerField" def formfield(self, **kwargs): return super().formfield(**{ 'min_value': 0, **kwargs, }) class PositiveSmallIntegerField(PositiveIntegerRelDbTypeMixin, IntegerField): description = _("Positive small integer") def get_internal_type(self): return "PositiveSmallIntegerField" def formfield(self, **kwargs): return super().formfield(**{ 'min_value': 0, **kwargs, }) class SlugField(CharField): default_validators = [validators.validate_slug] description = _("Slug (up to %(max_length)s)") def __init__(self, *args, max_length=50, db_index=True, allow_unicode=False, **kwargs): self.allow_unicode = allow_unicode if self.allow_unicode: self.default_validators = [validators.validate_unicode_slug] super().__init__(*args, max_length=max_length, db_index=db_index, **kwargs) def deconstruct(self): name, path, args, kwargs = super().deconstruct() if kwargs.get("max_length") == 50: del kwargs['max_length'] if self.db_index is False: kwargs['db_index'] = False else: del kwargs['db_index'] if self.allow_unicode is not False: kwargs['allow_unicode'] = self.allow_unicode return name, path, args, kwargs def get_internal_type(self): return "SlugField" def formfield(self, **kwargs): return super().formfield(**{ 'form_class': forms.SlugField, 'allow_unicode': self.allow_unicode, **kwargs, }) class SmallIntegerField(IntegerField): description = _("Small integer") def get_internal_type(self): return "SmallIntegerField" class TextField(Field): description = _("Text") def get_internal_type(self): return "TextField" def to_python(self, value): if isinstance(value, str) or value is None: return value return str(value) def get_prep_value(self, value): value = super().get_prep_value(value) return self.to_python(value) def formfield(self, **kwargs): # Passing max_length to forms.CharField means that the value's length # will be validated twice. This is considered acceptable since we want # the value in the form field (to pass into widget for example). return super().formfield(**{ 'max_length': self.max_length, **({} if self.choices is not None else {'widget': forms.Textarea}), **kwargs, }) class TimeField(DateTimeCheckMixin, Field): empty_strings_allowed = False default_error_messages = { 'invalid': _('“%(value)s” value has an invalid format. It must be in ' 'HH:MM[:ss[.uuuuuu]] format.'), 'invalid_time': _('“%(value)s” value has the correct format ' '(HH:MM[:ss[.uuuuuu]]) but it is an invalid time.'), } description = _("Time") def __init__(self, verbose_name=None, name=None, auto_now=False, auto_now_add=False, **kwargs): self.auto_now, self.auto_now_add = auto_now, auto_now_add if auto_now or auto_now_add: kwargs['editable'] = False kwargs['blank'] = True super().__init__(verbose_name, name, **kwargs) def _check_fix_default_value(self): """ Warn that using an actual date or datetime value is probably wrong; it's only evaluated on server startup. """ if not self.has_default(): return [] now = timezone.now() if not timezone.is_naive(now): now = timezone.make_naive(now, timezone.utc) value = self.default if isinstance(value, datetime.datetime): second_offset = datetime.timedelta(seconds=10) lower = now - second_offset upper = now + second_offset if timezone.is_aware(value): value = timezone.make_naive(value, timezone.utc) elif isinstance(value, datetime.time): second_offset = datetime.timedelta(seconds=10) lower = now - second_offset upper = now + second_offset value = datetime.datetime.combine(now.date(), value) if timezone.is_aware(value): value = timezone.make_naive(value, timezone.utc).time() else: # No explicit time / datetime value -- no checks necessary return [] if lower <= value <= upper: return [ checks.Warning( 'Fixed default value provided.', hint='It seems you set a fixed date / time / datetime ' 'value as default for this field. This may not be ' 'what you want. If you want to have the current date ' 'as default, use `django.utils.timezone.now`', obj=self, id='fields.W161', ) ] return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.auto_now is not False: kwargs["auto_now"] = self.auto_now if self.auto_now_add is not False: kwargs["auto_now_add"] = self.auto_now_add if self.auto_now or self.auto_now_add: del kwargs['blank'] del kwargs['editable'] return name, path, args, kwargs def get_internal_type(self): return "TimeField" def to_python(self, value): if value is None: return None if isinstance(value, datetime.time): return value if isinstance(value, datetime.datetime): # Not usually a good idea to pass in a datetime here (it loses # information), but this can be a side-effect of interacting with a # database backend (e.g. Oracle), so we'll be accommodating. return value.time() try: parsed = parse_time(value) if parsed is not None: return parsed except ValueError: raise exceptions.ValidationError( self.error_messages['invalid_time'], code='invalid_time', params={'value': value}, ) raise exceptions.ValidationError( self.error_messages['invalid'], code='invalid', params={'value': value}, ) def pre_save(self, model_instance, add): if self.auto_now or (self.auto_now_add and add): value = datetime.datetime.now().time() setattr(model_instance, self.attname, value) return value else: return super().pre_save(model_instance, add) def get_prep_value(self, value): value = super().get_prep_value(value) return self.to_python(value) def get_db_prep_value(self, value, connection, prepared=False): # Casts times into the format expected by the backend if not prepared: value = self.get_prep_value(value) return connection.ops.adapt_timefield_value(value) def value_to_string(self, obj): val = self.value_from_object(obj) return '' if val is None else val.isoformat() def formfield(self, **kwargs): return super().formfield(**{ 'form_class': forms.TimeField, **kwargs, }) class URLField(CharField): default_validators = [validators.URLValidator()] description = _("URL") def __init__(self, verbose_name=None, name=None, **kwargs): kwargs.setdefault('max_length', 200) super().__init__(verbose_name, name, **kwargs) def deconstruct(self): name, path, args, kwargs = super().deconstruct() if kwargs.get("max_length") == 200: del kwargs['max_length'] return name, path, args, kwargs def formfield(self, **kwargs): # As with CharField, this will cause URL validation to be performed # twice. return super().formfield(**{ 'form_class': forms.URLField, **kwargs, }) class BinaryField(Field): description = _("Raw binary data") empty_values = [None, b''] def __init__(self, *args, **kwargs): kwargs.setdefault('editable', False) super().__init__(*args, **kwargs) if self.max_length is not None: self.validators.append(validators.MaxLengthValidator(self.max_length)) def check(self, **kwargs): return [*super().check(**kwargs), *self._check_str_default_value()] def _check_str_default_value(self): if self.has_default() and isinstance(self.default, str): return [ checks.Error( "BinaryField's default cannot be a string. Use bytes " "content instead.", obj=self, id='fields.E170', ) ] return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.editable: kwargs['editable'] = True else: del kwargs['editable'] return name, path, args, kwargs def get_internal_type(self): return "BinaryField" def get_placeholder(self, value, compiler, connection): return connection.ops.binary_placeholder_sql(value) def get_default(self): if self.has_default() and not callable(self.default): return self.default default = super().get_default() if default == '': return b'' return default def get_db_prep_value(self, value, connection, prepared=False): value = super().get_db_prep_value(value, connection, prepared) if value is not None: return connection.Database.Binary(value) return value def value_to_string(self, obj): """Binary data is serialized as base64""" return b64encode(self.value_from_object(obj)).decode('ascii') def to_python(self, value): # If it's a string, it should be base64-encoded data if isinstance(value, str): return memoryview(b64decode(value.encode('ascii'))) return value class UUIDField(Field): default_error_messages = { 'invalid': _('“%(value)s” is not a valid UUID.'), } description = _('Universally unique identifier') empty_strings_allowed = False def __init__(self, verbose_name=None, **kwargs): kwargs['max_length'] = 32 super().__init__(verbose_name, **kwargs) def deconstruct(self): name, path, args, kwargs = super().deconstruct() del kwargs['max_length'] return name, path, args, kwargs def get_internal_type(self): return "UUIDField" def get_prep_value(self, value): value = super().get_prep_value(value) return self.to_python(value) def get_db_prep_value(self, value, connection, prepared=False): if value is None: return None if not isinstance(value, uuid.UUID): value = self.to_python(value) if connection.features.has_native_uuid_field: return value return value.hex def to_python(self, value): if value is not None and not isinstance(value, uuid.UUID): input_form = 'int' if isinstance(value, int) else 'hex' try: return uuid.UUID(**{input_form: value}) except (AttributeError, ValueError): raise exceptions.ValidationError( self.error_messages['invalid'], code='invalid', params={'value': value}, ) return value def formfield(self, **kwargs): return super().formfield(**{ 'form_class': forms.UUIDField, **kwargs, })
c7d3f25960ae63a615da071f1cf5624dd72f87b5664f3b55e476788d102b0a3f
from datetime import datetime from django.conf import settings from django.db.models.expressions import Func from django.db.models.fields import ( DateField, DateTimeField, DurationField, Field, IntegerField, TimeField, ) from django.db.models.lookups import ( Transform, YearExact, YearGt, YearGte, YearLt, YearLte, ) from django.utils import timezone class TimezoneMixin: tzinfo = None def get_tzname(self): # Timezone conversions must happen to the input datetime *before* # applying a function. 2015-12-31 23:00:00 -02:00 is stored in the # database as 2016-01-01 01:00:00 +00:00. Any results should be # based on the input datetime not the stored datetime. tzname = None if settings.USE_TZ: if self.tzinfo is None: tzname = timezone.get_current_timezone_name() else: tzname = timezone._get_timezone_name(self.tzinfo) return tzname class Extract(TimezoneMixin, Transform): lookup_name = None output_field = IntegerField() def __init__(self, expression, lookup_name=None, tzinfo=None, **extra): if self.lookup_name is None: self.lookup_name = lookup_name if self.lookup_name is None: raise ValueError('lookup_name must be provided') self.tzinfo = tzinfo super().__init__(expression, **extra) def as_sql(self, compiler, connection): sql, params = compiler.compile(self.lhs) lhs_output_field = self.lhs.output_field if isinstance(lhs_output_field, DateTimeField): tzname = self.get_tzname() sql = connection.ops.datetime_extract_sql(self.lookup_name, sql, tzname) elif isinstance(lhs_output_field, DateField): sql = connection.ops.date_extract_sql(self.lookup_name, sql) elif isinstance(lhs_output_field, TimeField): sql = connection.ops.time_extract_sql(self.lookup_name, sql) elif isinstance(lhs_output_field, DurationField): if not connection.features.has_native_duration_field: raise ValueError('Extract requires native DurationField database support.') sql = connection.ops.time_extract_sql(self.lookup_name, sql) else: # resolve_expression has already validated the output_field so this # assert should never be hit. assert False, "Tried to Extract from an invalid type." return sql, params def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): copy = super().resolve_expression(query, allow_joins, reuse, summarize, for_save) field = copy.lhs.output_field if not isinstance(field, (DateField, DateTimeField, TimeField, DurationField)): raise ValueError( 'Extract input expression must be DateField, DateTimeField, ' 'TimeField, or DurationField.' ) # Passing dates to functions expecting datetimes is most likely a mistake. if type(field) == DateField and copy.lookup_name in ('hour', 'minute', 'second'): raise ValueError( "Cannot extract time component '%s' from DateField '%s'. " % (copy.lookup_name, field.name) ) if ( isinstance(field, DurationField) and copy.lookup_name in ('year', 'iso_year', 'month', 'week', 'week_day', 'quarter') ): raise ValueError( "Cannot extract component '%s' from DurationField '%s'." % (copy.lookup_name, field.name) ) return copy class ExtractYear(Extract): lookup_name = 'year' class ExtractIsoYear(Extract): """Return the ISO-8601 week-numbering year.""" lookup_name = 'iso_year' class ExtractMonth(Extract): lookup_name = 'month' class ExtractDay(Extract): lookup_name = 'day' class ExtractWeek(Extract): """ Return 1-52 or 53, based on ISO-8601, i.e., Monday is the first of the week. """ lookup_name = 'week' class ExtractWeekDay(Extract): """ Return Sunday=1 through Saturday=7. To replicate this in Python: (mydatetime.isoweekday() % 7) + 1 """ lookup_name = 'week_day' class ExtractQuarter(Extract): lookup_name = 'quarter' class ExtractHour(Extract): lookup_name = 'hour' class ExtractMinute(Extract): lookup_name = 'minute' class ExtractSecond(Extract): lookup_name = 'second' DateField.register_lookup(ExtractYear) DateField.register_lookup(ExtractMonth) DateField.register_lookup(ExtractDay) DateField.register_lookup(ExtractWeekDay) DateField.register_lookup(ExtractWeek) DateField.register_lookup(ExtractIsoYear) DateField.register_lookup(ExtractQuarter) TimeField.register_lookup(ExtractHour) TimeField.register_lookup(ExtractMinute) TimeField.register_lookup(ExtractSecond) DateTimeField.register_lookup(ExtractHour) DateTimeField.register_lookup(ExtractMinute) DateTimeField.register_lookup(ExtractSecond) ExtractYear.register_lookup(YearExact) ExtractYear.register_lookup(YearGt) ExtractYear.register_lookup(YearGte) ExtractYear.register_lookup(YearLt) ExtractYear.register_lookup(YearLte) ExtractIsoYear.register_lookup(YearExact) ExtractIsoYear.register_lookup(YearGt) ExtractIsoYear.register_lookup(YearGte) ExtractIsoYear.register_lookup(YearLt) ExtractIsoYear.register_lookup(YearLte) class Now(Func): template = 'CURRENT_TIMESTAMP' output_field = DateTimeField() def as_postgresql(self, compiler, connection, **extra_context): # PostgreSQL's CURRENT_TIMESTAMP means "the time at the start of the # transaction". Use STATEMENT_TIMESTAMP to be cross-compatible with # other databases. return self.as_sql(compiler, connection, template='STATEMENT_TIMESTAMP()', **extra_context) class TruncBase(TimezoneMixin, Transform): kind = None tzinfo = None def __init__(self, expression, output_field=None, tzinfo=None, is_dst=None, **extra): self.tzinfo = tzinfo self.is_dst = is_dst super().__init__(expression, output_field=output_field, **extra) def as_sql(self, compiler, connection): inner_sql, inner_params = compiler.compile(self.lhs) if isinstance(self.output_field, DateTimeField): tzname = self.get_tzname() sql = connection.ops.datetime_trunc_sql(self.kind, inner_sql, tzname) elif isinstance(self.output_field, DateField): sql = connection.ops.date_trunc_sql(self.kind, inner_sql) elif isinstance(self.output_field, TimeField): sql = connection.ops.time_trunc_sql(self.kind, inner_sql) else: raise ValueError('Trunc only valid on DateField, TimeField, or DateTimeField.') return sql, inner_params def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): copy = super().resolve_expression(query, allow_joins, reuse, summarize, for_save) field = copy.lhs.output_field # DateTimeField is a subclass of DateField so this works for both. assert isinstance(field, (DateField, TimeField)), ( "%r isn't a DateField, TimeField, or DateTimeField." % field.name ) # If self.output_field was None, then accessing the field will trigger # the resolver to assign it to self.lhs.output_field. if not isinstance(copy.output_field, (DateField, DateTimeField, TimeField)): raise ValueError('output_field must be either DateField, TimeField, or DateTimeField') # Passing dates or times to functions expecting datetimes is most # likely a mistake. class_output_field = self.__class__.output_field if isinstance(self.__class__.output_field, Field) else None output_field = class_output_field or copy.output_field has_explicit_output_field = class_output_field or field.__class__ is not copy.output_field.__class__ if type(field) == DateField and ( isinstance(output_field, DateTimeField) or copy.kind in ('hour', 'minute', 'second', 'time')): raise ValueError("Cannot truncate DateField '%s' to %s. " % ( field.name, output_field.__class__.__name__ if has_explicit_output_field else 'DateTimeField' )) elif isinstance(field, TimeField) and ( isinstance(output_field, DateTimeField) or copy.kind in ('year', 'quarter', 'month', 'week', 'day', 'date')): raise ValueError("Cannot truncate TimeField '%s' to %s. " % ( field.name, output_field.__class__.__name__ if has_explicit_output_field else 'DateTimeField' )) return copy def convert_value(self, value, expression, connection): if isinstance(self.output_field, DateTimeField): if not settings.USE_TZ: pass elif value is not None: value = value.replace(tzinfo=None) value = timezone.make_aware(value, self.tzinfo, is_dst=self.is_dst) elif not connection.features.has_zoneinfo_database: raise ValueError( 'Database returned an invalid datetime value. Are time ' 'zone definitions for your database installed?' ) elif isinstance(value, datetime): if value is None: pass elif isinstance(self.output_field, DateField): value = value.date() elif isinstance(self.output_field, TimeField): value = value.time() return value class Trunc(TruncBase): def __init__(self, expression, kind, output_field=None, tzinfo=None, is_dst=None, **extra): self.kind = kind super().__init__( expression, output_field=output_field, tzinfo=tzinfo, is_dst=is_dst, **extra ) class TruncYear(TruncBase): kind = 'year' class TruncQuarter(TruncBase): kind = 'quarter' class TruncMonth(TruncBase): kind = 'month' class TruncWeek(TruncBase): """Truncate to midnight on the Monday of the week.""" kind = 'week' class TruncDay(TruncBase): kind = 'day' class TruncDate(TruncBase): kind = 'date' lookup_name = 'date' output_field = DateField() def as_sql(self, compiler, connection): # Cast to date rather than truncate to date. lhs, lhs_params = compiler.compile(self.lhs) tzname = timezone.get_current_timezone_name() if settings.USE_TZ else None sql = connection.ops.datetime_cast_date_sql(lhs, tzname) return sql, lhs_params class TruncTime(TruncBase): kind = 'time' lookup_name = 'time' output_field = TimeField() def as_sql(self, compiler, connection): # Cast to time rather than truncate to time. lhs, lhs_params = compiler.compile(self.lhs) tzname = timezone.get_current_timezone_name() if settings.USE_TZ else None sql = connection.ops.datetime_cast_time_sql(lhs, tzname) return sql, lhs_params class TruncHour(TruncBase): kind = 'hour' class TruncMinute(TruncBase): kind = 'minute' class TruncSecond(TruncBase): kind = 'second' DateTimeField.register_lookup(TruncDate) DateTimeField.register_lookup(TruncTime)
36097be0c9ad8d4da46ce0bf445e93fac9df13a31dff7c57297678736e32c599
""" Create SQL statements for QuerySets. The code in here encapsulates all of the SQL construction so that QuerySets themselves do not have to (and could be backed by things other than SQL databases). The abstraction barrier only works one way: this module has to know all about the internals of models in order to get the information it needs. """ import difflib import functools import inspect import sys import warnings from collections import Counter, namedtuple from collections.abc import Iterator, Mapping from itertools import chain, count, product from string import ascii_uppercase from django.core.exceptions import ( EmptyResultSet, FieldDoesNotExist, FieldError, ) from django.db import DEFAULT_DB_ALIAS, NotSupportedError, connections from django.db.models.aggregates import Count from django.db.models.constants import LOOKUP_SEP from django.db.models.expressions import ( BaseExpression, Col, F, OuterRef, Ref, SimpleCol, ) from django.db.models.fields import Field from django.db.models.fields.related_lookups import MultiColSource from django.db.models.lookups import Lookup from django.db.models.query_utils import ( Q, check_rel_lookup_compatibility, refs_expression, ) from django.db.models.sql.constants import ( INNER, LOUTER, ORDER_DIR, ORDER_PATTERN, SINGLE, ) from django.db.models.sql.datastructures import ( BaseTable, Empty, Join, MultiJoin, ) from django.db.models.sql.where import ( AND, OR, ExtraWhere, NothingNode, WhereNode, ) from django.utils.deprecation import RemovedInDjango40Warning from django.utils.functional import cached_property from django.utils.tree import Node __all__ = ['Query', 'RawQuery'] def get_field_names_from_opts(opts): return set(chain.from_iterable( (f.name, f.attname) if f.concrete else (f.name,) for f in opts.get_fields() )) def get_children_from_q(q): for child in q.children: if isinstance(child, Node): yield from get_children_from_q(child) else: yield child JoinInfo = namedtuple( 'JoinInfo', ('final_field', 'targets', 'opts', 'joins', 'path', 'transform_function') ) def _get_col(target, field, alias, simple_col): if simple_col: return SimpleCol(target, field) return target.get_col(alias, field) class RawQuery: """A single raw SQL query.""" def __init__(self, sql, using, params=None): self.params = params or () self.sql = sql self.using = using self.cursor = None # Mirror some properties of a normal query so that # the compiler can be used to process results. self.low_mark, self.high_mark = 0, None # Used for offset/limit self.extra_select = {} self.annotation_select = {} def chain(self, using): return self.clone(using) def clone(self, using): return RawQuery(self.sql, using, params=self.params) def get_columns(self): if self.cursor is None: self._execute_query() converter = connections[self.using].introspection.identifier_converter return [converter(column_meta[0]) for column_meta in self.cursor.description] def __iter__(self): # Always execute a new query for a new iterator. # This could be optimized with a cache at the expense of RAM. self._execute_query() if not connections[self.using].features.can_use_chunked_reads: # If the database can't use chunked reads we need to make sure we # evaluate the entire query up front. result = list(self.cursor) else: result = self.cursor return iter(result) def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self) @property def params_type(self): return dict if isinstance(self.params, Mapping) else tuple def __str__(self): return self.sql % self.params_type(self.params) def _execute_query(self): connection = connections[self.using] # Adapt parameters to the database, as much as possible considering # that the target type isn't known. See #17755. params_type = self.params_type adapter = connection.ops.adapt_unknown_value if params_type is tuple: params = tuple(adapter(val) for val in self.params) elif params_type is dict: params = {key: adapter(val) for key, val in self.params.items()} else: raise RuntimeError("Unexpected params type: %s" % params_type) self.cursor = connection.cursor() self.cursor.execute(self.sql, params) class Query(BaseExpression): """A single SQL query.""" alias_prefix = 'T' subq_aliases = frozenset([alias_prefix]) compiler = 'SQLCompiler' def __init__(self, model, where=WhereNode): self.model = model self.alias_refcount = {} # alias_map is the most important data structure regarding joins. # It's used for recording which joins exist in the query and what # types they are. The key is the alias of the joined table (possibly # the table name) and the value is a Join-like object (see # sql.datastructures.Join for more information). self.alias_map = {} # Sometimes the query contains references to aliases in outer queries (as # a result of split_exclude). Correct alias quoting needs to know these # aliases too. self.external_aliases = set() self.table_map = {} # Maps table names to list of aliases. self.default_cols = True self.default_ordering = True self.standard_ordering = True self.used_aliases = set() self.filter_is_sticky = False self.subquery = False # SQL-related attributes # Select and related select clauses are expressions to use in the # SELECT clause of the query. # The select is used for cases where we want to set up the select # clause to contain other than default fields (values(), subqueries...) # Note that annotations go to annotations dictionary. self.select = () self.where = where() self.where_class = where # The group_by attribute can have one of the following forms: # - None: no group by at all in the query # - A tuple of expressions: group by (at least) those expressions. # String refs are also allowed for now. # - True: group by all select fields of the model # See compiler.get_group_by() for details. self.group_by = None self.order_by = () self.low_mark, self.high_mark = 0, None # Used for offset/limit self.distinct = False self.distinct_fields = () self.select_for_update = False self.select_for_update_nowait = False self.select_for_update_skip_locked = False self.select_for_update_of = () self.select_related = False # Arbitrary limit for select_related to prevents infinite recursion. self.max_depth = 5 # Holds the selects defined by a call to values() or values_list() # excluding annotation_select and extra_select. self.values_select = () # SQL annotation-related attributes self.annotations = {} # Maps alias -> Annotation Expression self.annotation_select_mask = None self._annotation_select_cache = None # Set combination attributes self.combinator = None self.combinator_all = False self.combined_queries = () # These are for extensions. The contents are more or less appended # verbatim to the appropriate clause. self.extra = {} # Maps col_alias -> (col_sql, params). self.extra_select_mask = None self._extra_select_cache = None self.extra_tables = () self.extra_order_by = () # A tuple that is a set of model field names and either True, if these # are the fields to defer, or False if these are the only fields to # load. self.deferred_loading = (frozenset(), True) self._filtered_relations = {} self.explain_query = False self.explain_format = None self.explain_options = {} @property def output_field(self): if len(self.select) == 1: return self.select[0].field elif len(self.annotation_select) == 1: return next(iter(self.annotation_select.values())).output_field @property def has_select_fields(self): return bool(self.select or self.annotation_select_mask or self.extra_select_mask) @cached_property def base_table(self): for alias in self.alias_map: return alias def __str__(self): """ Return the query as a string of SQL with the parameter values substituted in (use sql_with_params() to see the unsubstituted string). Parameter values won't necessarily be quoted correctly, since that is done by the database interface at execution time. """ sql, params = self.sql_with_params() return sql % params def sql_with_params(self): """ Return the query as an SQL string and the parameters that will be substituted into the query. """ return self.get_compiler(DEFAULT_DB_ALIAS).as_sql() def __deepcopy__(self, memo): """Limit the amount of work when a Query is deepcopied.""" result = self.clone() memo[id(self)] = result return result def get_compiler(self, using=None, connection=None): if using is None and connection is None: raise ValueError("Need either using or connection") if using: connection = connections[using] return connection.ops.compiler(self.compiler)(self, connection, using) def get_meta(self): """ Return the Options instance (the model._meta) from which to start processing. Normally, this is self.model._meta, but it can be changed by subclasses. """ return self.model._meta def clone(self): """ Return a copy of the current Query. A lightweight alternative to to deepcopy(). """ obj = Empty() obj.__class__ = self.__class__ # Copy references to everything. obj.__dict__ = self.__dict__.copy() # Clone attributes that can't use shallow copy. obj.alias_refcount = self.alias_refcount.copy() obj.alias_map = self.alias_map.copy() obj.external_aliases = self.external_aliases.copy() obj.table_map = self.table_map.copy() obj.where = self.where.clone() obj.annotations = self.annotations.copy() if self.annotation_select_mask is None: obj.annotation_select_mask = None else: obj.annotation_select_mask = self.annotation_select_mask.copy() # _annotation_select_cache cannot be copied, as doing so breaks the # (necessary) state in which both annotations and # _annotation_select_cache point to the same underlying objects. # It will get re-populated in the cloned queryset the next time it's # used. obj._annotation_select_cache = None obj.extra = self.extra.copy() if self.extra_select_mask is None: obj.extra_select_mask = None else: obj.extra_select_mask = self.extra_select_mask.copy() if self._extra_select_cache is None: obj._extra_select_cache = None else: obj._extra_select_cache = self._extra_select_cache.copy() if 'subq_aliases' in self.__dict__: obj.subq_aliases = self.subq_aliases.copy() obj.used_aliases = self.used_aliases.copy() obj._filtered_relations = self._filtered_relations.copy() # Clear the cached_property try: del obj.base_table except AttributeError: pass return obj def chain(self, klass=None): """ Return a copy of the current Query that's ready for another operation. The klass argument changes the type of the Query, e.g. UpdateQuery. """ obj = self.clone() if klass and obj.__class__ != klass: obj.__class__ = klass if not obj.filter_is_sticky: obj.used_aliases = set() obj.filter_is_sticky = False if hasattr(obj, '_setup_query'): obj._setup_query() return obj def relabeled_clone(self, change_map): clone = self.clone() clone.change_aliases(change_map) return clone def rewrite_cols(self, annotation, col_cnt): # We must make sure the inner query has the referred columns in it. # If we are aggregating over an annotation, then Django uses Ref() # instances to note this. However, if we are annotating over a column # of a related model, then it might be that column isn't part of the # SELECT clause of the inner query, and we must manually make sure # the column is selected. An example case is: # .aggregate(Sum('author__awards')) # Resolving this expression results in a join to author, but there # is no guarantee the awards column of author is in the select clause # of the query. Thus we must manually add the column to the inner # query. orig_exprs = annotation.get_source_expressions() new_exprs = [] for expr in orig_exprs: # FIXME: These conditions are fairly arbitrary. Identify a better # method of having expressions decide which code path they should # take. if isinstance(expr, Ref): # Its already a Ref to subquery (see resolve_ref() for # details) new_exprs.append(expr) elif isinstance(expr, (WhereNode, Lookup)): # Decompose the subexpressions further. The code here is # copied from the else clause, but this condition must appear # before the contains_aggregate/is_summary condition below. new_expr, col_cnt = self.rewrite_cols(expr, col_cnt) new_exprs.append(new_expr) else: # Reuse aliases of expressions already selected in subquery. for col_alias, selected_annotation in self.annotation_select.items(): if selected_annotation == expr: new_expr = Ref(col_alias, expr) break else: # An expression that is not selected the subquery. if isinstance(expr, Col) or (expr.contains_aggregate and not expr.is_summary): # Reference column or another aggregate. Select it # under a non-conflicting alias. col_cnt += 1 col_alias = '__col%d' % col_cnt self.annotations[col_alias] = expr self.append_annotation_mask([col_alias]) new_expr = Ref(col_alias, expr) else: # Some other expression not referencing database values # directly. Its subexpression might contain Cols. new_expr, col_cnt = self.rewrite_cols(expr, col_cnt) new_exprs.append(new_expr) annotation.set_source_expressions(new_exprs) return annotation, col_cnt def get_aggregation(self, using, added_aggregate_names): """ Return the dictionary with the values of the existing aggregations. """ if not self.annotation_select: return {} existing_annotations = [ annotation for alias, annotation in self.annotations.items() if alias not in added_aggregate_names ] # Decide if we need to use a subquery. # # Existing annotations would cause incorrect results as get_aggregation() # must produce just one result and thus must not use GROUP BY. But we # aren't smart enough to remove the existing annotations from the # query, so those would force us to use GROUP BY. # # If the query has limit or distinct, or uses set operations, then # those operations must be done in a subquery so that the query # aggregates on the limit and/or distinct results instead of applying # the distinct and limit after the aggregation. if (isinstance(self.group_by, tuple) or self.is_sliced or existing_annotations or self.distinct or self.combinator): from django.db.models.sql.subqueries import AggregateQuery outer_query = AggregateQuery(self.model) inner_query = self.clone() inner_query.select_for_update = False inner_query.select_related = False inner_query.set_annotation_mask(self.annotation_select) if not self.is_sliced and not self.distinct_fields: # Queries with distinct_fields need ordering and when a limit # is applied we must take the slice from the ordered query. # Otherwise no need for ordering. inner_query.clear_ordering(True) if not inner_query.distinct: # If the inner query uses default select and it has some # aggregate annotations, then we must make sure the inner # query is grouped by the main model's primary key. However, # clearing the select clause can alter results if distinct is # used. has_existing_aggregate_annotations = any( annotation for annotation in existing_annotations if getattr(annotation, 'contains_aggregate', True) ) if inner_query.default_cols and has_existing_aggregate_annotations: inner_query.group_by = (self.model._meta.pk.get_col(inner_query.get_initial_alias()),) inner_query.default_cols = False relabels = {t: 'subquery' for t in inner_query.alias_map} relabels[None] = 'subquery' # Remove any aggregates marked for reduction from the subquery # and move them to the outer AggregateQuery. col_cnt = 0 for alias, expression in list(inner_query.annotation_select.items()): annotation_select_mask = inner_query.annotation_select_mask if expression.is_summary: expression, col_cnt = inner_query.rewrite_cols(expression, col_cnt) outer_query.annotations[alias] = expression.relabeled_clone(relabels) del inner_query.annotations[alias] annotation_select_mask.remove(alias) # Make sure the annotation_select wont use cached results. inner_query.set_annotation_mask(inner_query.annotation_select_mask) if inner_query.select == () and not inner_query.default_cols and not inner_query.annotation_select_mask: # In case of Model.objects[0:3].count(), there would be no # field selected in the inner query, yet we must use a subquery. # So, make sure at least one field is selected. inner_query.select = (self.model._meta.pk.get_col(inner_query.get_initial_alias()),) try: outer_query.add_subquery(inner_query, using) except EmptyResultSet: return { alias: None for alias in outer_query.annotation_select } else: outer_query = self self.select = () self.default_cols = False self.extra = {} outer_query.clear_ordering(True) outer_query.clear_limits() outer_query.select_for_update = False outer_query.select_related = False compiler = outer_query.get_compiler(using) result = compiler.execute_sql(SINGLE) if result is None: result = [None] * len(outer_query.annotation_select) converters = compiler.get_converters(outer_query.annotation_select.values()) result = next(compiler.apply_converters((result,), converters)) return dict(zip(outer_query.annotation_select, result)) def get_count(self, using): """ Perform a COUNT() query using the current filter constraints. """ obj = self.clone() obj.add_annotation(Count('*'), alias='__count', is_summary=True) number = obj.get_aggregation(using, ['__count'])['__count'] if number is None: number = 0 return number def has_filters(self): return self.where def has_results(self, using): q = self.clone() if not q.distinct: if q.group_by is True: q.add_fields((f.attname for f in self.model._meta.concrete_fields), False) q.set_group_by() q.clear_select_clause() q.clear_ordering(True) q.set_limits(high=1) compiler = q.get_compiler(using=using) return compiler.has_results() def explain(self, using, format=None, **options): q = self.clone() q.explain_query = True q.explain_format = format q.explain_options = options compiler = q.get_compiler(using=using) return '\n'.join(compiler.explain_query()) def combine(self, rhs, connector): """ Merge the 'rhs' query into the current one (with any 'rhs' effects being applied *after* (that is, "to the right of") anything in the current query. 'rhs' is not modified during a call to this function. The 'connector' parameter describes how to connect filters from the 'rhs' query. """ assert self.model == rhs.model, \ "Cannot combine queries on two different base models." assert not self.is_sliced, \ "Cannot combine queries once a slice has been taken." assert self.distinct == rhs.distinct, \ "Cannot combine a unique query with a non-unique query." assert self.distinct_fields == rhs.distinct_fields, \ "Cannot combine queries with different distinct fields." # Work out how to relabel the rhs aliases, if necessary. change_map = {} conjunction = (connector == AND) # Determine which existing joins can be reused. When combining the # query with AND we must recreate all joins for m2m filters. When # combining with OR we can reuse joins. The reason is that in AND # case a single row can't fulfill a condition like: # revrel__col=1 & revrel__col=2 # But, there might be two different related rows matching this # condition. In OR case a single True is enough, so single row is # enough, too. # # Note that we will be creating duplicate joins for non-m2m joins in # the AND case. The results will be correct but this creates too many # joins. This is something that could be fixed later on. reuse = set() if conjunction else set(self.alias_map) # Base table must be present in the query - this is the same # table on both sides. self.get_initial_alias() joinpromoter = JoinPromoter(connector, 2, False) joinpromoter.add_votes( j for j in self.alias_map if self.alias_map[j].join_type == INNER) rhs_votes = set() # Now, add the joins from rhs query into the new query (skipping base # table). rhs_tables = list(rhs.alias_map)[1:] for alias in rhs_tables: join = rhs.alias_map[alias] # If the left side of the join was already relabeled, use the # updated alias. join = join.relabeled_clone(change_map) new_alias = self.join(join, reuse=reuse) if join.join_type == INNER: rhs_votes.add(new_alias) # We can't reuse the same join again in the query. If we have two # distinct joins for the same connection in rhs query, then the # combined query must have two joins, too. reuse.discard(new_alias) if alias != new_alias: change_map[alias] = new_alias if not rhs.alias_refcount[alias]: # The alias was unused in the rhs query. Unref it so that it # will be unused in the new query, too. We have to add and # unref the alias so that join promotion has information of # the join type for the unused alias. self.unref_alias(new_alias) joinpromoter.add_votes(rhs_votes) joinpromoter.update_join_types(self) # Now relabel a copy of the rhs where-clause and add it to the current # one. w = rhs.where.clone() w.relabel_aliases(change_map) self.where.add(w, connector) # Selection columns and extra extensions are those provided by 'rhs'. if rhs.select: self.set_select([col.relabeled_clone(change_map) for col in rhs.select]) else: self.select = () if connector == OR: # It would be nice to be able to handle this, but the queries don't # really make sense (or return consistent value sets). Not worth # the extra complexity when you can write a real query instead. if self.extra and rhs.extra: raise ValueError("When merging querysets using 'or', you cannot have extra(select=...) on both sides.") self.extra.update(rhs.extra) extra_select_mask = set() if self.extra_select_mask is not None: extra_select_mask.update(self.extra_select_mask) if rhs.extra_select_mask is not None: extra_select_mask.update(rhs.extra_select_mask) if extra_select_mask: self.set_extra_mask(extra_select_mask) self.extra_tables += rhs.extra_tables # Ordering uses the 'rhs' ordering, unless it has none, in which case # the current ordering is used. self.order_by = rhs.order_by or self.order_by self.extra_order_by = rhs.extra_order_by or self.extra_order_by def deferred_to_data(self, target, callback): """ Convert the self.deferred_loading data structure to an alternate data structure, describing the field that *will* be loaded. This is used to compute the columns to select from the database and also by the QuerySet class to work out which fields are being initialized on each model. Models that have all their fields included aren't mentioned in the result, only those that have field restrictions in place. The "target" parameter is the instance that is populated (in place). The "callback" is a function that is called whenever a (model, field) pair need to be added to "target". It accepts three parameters: "target", and the model and list of fields being added for that model. """ field_names, defer = self.deferred_loading if not field_names: return orig_opts = self.get_meta() seen = {} must_include = {orig_opts.concrete_model: {orig_opts.pk}} for field_name in field_names: parts = field_name.split(LOOKUP_SEP) cur_model = self.model._meta.concrete_model opts = orig_opts for name in parts[:-1]: old_model = cur_model if name in self._filtered_relations: name = self._filtered_relations[name].relation_name source = opts.get_field(name) if is_reverse_o2o(source): cur_model = source.related_model else: cur_model = source.remote_field.model opts = cur_model._meta # Even if we're "just passing through" this model, we must add # both the current model's pk and the related reference field # (if it's not a reverse relation) to the things we select. if not is_reverse_o2o(source): must_include[old_model].add(source) add_to_dict(must_include, cur_model, opts.pk) field = opts.get_field(parts[-1]) is_reverse_object = field.auto_created and not field.concrete model = field.related_model if is_reverse_object else field.model model = model._meta.concrete_model if model == opts.model: model = cur_model if not is_reverse_o2o(field): add_to_dict(seen, model, field) if defer: # We need to load all fields for each model, except those that # appear in "seen" (for all models that appear in "seen"). The only # slight complexity here is handling fields that exist on parent # models. workset = {} for model, values in seen.items(): for field in model._meta.local_fields: if field not in values: m = field.model._meta.concrete_model add_to_dict(workset, m, field) for model, values in must_include.items(): # If we haven't included a model in workset, we don't add the # corresponding must_include fields for that model, since an # empty set means "include all fields". That's why there's no # "else" branch here. if model in workset: workset[model].update(values) for model, values in workset.items(): callback(target, model, values) else: for model, values in must_include.items(): if model in seen: seen[model].update(values) else: # As we've passed through this model, but not explicitly # included any fields, we have to make sure it's mentioned # so that only the "must include" fields are pulled in. seen[model] = values # Now ensure that every model in the inheritance chain is mentioned # in the parent list. Again, it must be mentioned to ensure that # only "must include" fields are pulled in. for model in orig_opts.get_parent_list(): seen.setdefault(model, set()) for model, values in seen.items(): callback(target, model, values) def table_alias(self, table_name, create=False, filtered_relation=None): """ Return a table alias for the given table_name and whether this is a new alias or not. If 'create' is true, a new alias is always created. Otherwise, the most recently created alias for the table (if one exists) is reused. """ alias_list = self.table_map.get(table_name) if not create and alias_list: alias = alias_list[0] self.alias_refcount[alias] += 1 return alias, False # Create a new alias for this table. if alias_list: alias = '%s%d' % (self.alias_prefix, len(self.alias_map) + 1) alias_list.append(alias) else: # The first occurrence of a table uses the table name directly. alias = filtered_relation.alias if filtered_relation is not None else table_name self.table_map[table_name] = [alias] self.alias_refcount[alias] = 1 return alias, True def ref_alias(self, alias): """Increases the reference count for this alias.""" self.alias_refcount[alias] += 1 def unref_alias(self, alias, amount=1): """Decreases the reference count for this alias.""" self.alias_refcount[alias] -= amount def promote_joins(self, aliases): """ Promote recursively the join type of given aliases and its children to an outer join. If 'unconditional' is False, only promote the join if it is nullable or the parent join is an outer join. The children promotion is done to avoid join chains that contain a LOUTER b INNER c. So, if we have currently a INNER b INNER c and a->b is promoted, then we must also promote b->c automatically, or otherwise the promotion of a->b doesn't actually change anything in the query results. """ aliases = list(aliases) while aliases: alias = aliases.pop(0) if self.alias_map[alias].join_type is None: # This is the base table (first FROM entry) - this table # isn't really joined at all in the query, so we should not # alter its join type. continue # Only the first alias (skipped above) should have None join_type assert self.alias_map[alias].join_type is not None parent_alias = self.alias_map[alias].parent_alias parent_louter = parent_alias and self.alias_map[parent_alias].join_type == LOUTER already_louter = self.alias_map[alias].join_type == LOUTER if ((self.alias_map[alias].nullable or parent_louter) and not already_louter): self.alias_map[alias] = self.alias_map[alias].promote() # Join type of 'alias' changed, so re-examine all aliases that # refer to this one. aliases.extend( join for join in self.alias_map if self.alias_map[join].parent_alias == alias and join not in aliases ) def demote_joins(self, aliases): """ Change join type from LOUTER to INNER for all joins in aliases. Similarly to promote_joins(), this method must ensure no join chains containing first an outer, then an inner join are generated. If we are demoting b->c join in chain a LOUTER b LOUTER c then we must demote a->b automatically, or otherwise the demotion of b->c doesn't actually change anything in the query results. . """ aliases = list(aliases) while aliases: alias = aliases.pop(0) if self.alias_map[alias].join_type == LOUTER: self.alias_map[alias] = self.alias_map[alias].demote() parent_alias = self.alias_map[alias].parent_alias if self.alias_map[parent_alias].join_type == INNER: aliases.append(parent_alias) def reset_refcounts(self, to_counts): """ Reset reference counts for aliases so that they match the value passed in `to_counts`. """ for alias, cur_refcount in self.alias_refcount.copy().items(): unref_amount = cur_refcount - to_counts.get(alias, 0) self.unref_alias(alias, unref_amount) def change_aliases(self, change_map): """ Change the aliases in change_map (which maps old-alias -> new-alias), relabelling any references to them in select columns and the where clause. """ assert set(change_map).isdisjoint(change_map.values()) # 1. Update references in "select" (normal columns plus aliases), # "group by" and "where". self.where.relabel_aliases(change_map) if isinstance(self.group_by, tuple): self.group_by = tuple([col.relabeled_clone(change_map) for col in self.group_by]) self.select = tuple([col.relabeled_clone(change_map) for col in self.select]) self.annotations = self.annotations and { key: col.relabeled_clone(change_map) for key, col in self.annotations.items() } # 2. Rename the alias in the internal table/alias datastructures. for old_alias, new_alias in change_map.items(): if old_alias not in self.alias_map: continue alias_data = self.alias_map[old_alias].relabeled_clone(change_map) self.alias_map[new_alias] = alias_data self.alias_refcount[new_alias] = self.alias_refcount[old_alias] del self.alias_refcount[old_alias] del self.alias_map[old_alias] table_aliases = self.table_map[alias_data.table_name] for pos, alias in enumerate(table_aliases): if alias == old_alias: table_aliases[pos] = new_alias break self.external_aliases = {change_map.get(alias, alias) for alias in self.external_aliases} def bump_prefix(self, outer_query): """ Change the alias prefix to the next letter in the alphabet in a way that the outer query's aliases and this query's aliases will not conflict. Even tables that previously had no alias will get an alias after this call. """ def prefix_gen(): """ Generate a sequence of characters in alphabetical order: -> 'A', 'B', 'C', ... When the alphabet is finished, the sequence will continue with the Cartesian product: -> 'AA', 'AB', 'AC', ... """ alphabet = ascii_uppercase prefix = chr(ord(self.alias_prefix) + 1) yield prefix for n in count(1): seq = alphabet[alphabet.index(prefix):] if prefix else alphabet for s in product(seq, repeat=n): yield ''.join(s) prefix = None if self.alias_prefix != outer_query.alias_prefix: # No clashes between self and outer query should be possible. return # Explicitly avoid infinite loop. The constant divider is based on how # much depth recursive subquery references add to the stack. This value # might need to be adjusted when adding or removing function calls from # the code path in charge of performing these operations. local_recursion_limit = sys.getrecursionlimit() // 16 for pos, prefix in enumerate(prefix_gen()): if prefix not in self.subq_aliases: self.alias_prefix = prefix break if pos > local_recursion_limit: raise RecursionError( 'Maximum recursion depth exceeded: too many subqueries.' ) self.subq_aliases = self.subq_aliases.union([self.alias_prefix]) outer_query.subq_aliases = outer_query.subq_aliases.union(self.subq_aliases) self.change_aliases({ alias: '%s%d' % (self.alias_prefix, pos) for pos, alias in enumerate(self.alias_map) }) def get_initial_alias(self): """ Return the first alias for this query, after increasing its reference count. """ if self.alias_map: alias = self.base_table self.ref_alias(alias) else: alias = self.join(BaseTable(self.get_meta().db_table, None)) return alias def count_active_tables(self): """ Return the number of tables in this query with a non-zero reference count. After execution, the reference counts are zeroed, so tables added in compiler will not be seen by this method. """ return len([1 for count in self.alias_refcount.values() if count]) def join(self, join, reuse=None, reuse_with_filtered_relation=False): """ Return an alias for the 'join', either reusing an existing alias for that join or creating a new one. 'join' is either a sql.datastructures.BaseTable or Join. The 'reuse' parameter can be either None which means all joins are reusable, or it can be a set containing the aliases that can be reused. The 'reuse_with_filtered_relation' parameter is used when computing FilteredRelation instances. A join is always created as LOUTER if the lhs alias is LOUTER to make sure chains like t1 LOUTER t2 INNER t3 aren't generated. All new joins are created as LOUTER if the join is nullable. """ if reuse_with_filtered_relation and reuse: reuse_aliases = [ a for a, j in self.alias_map.items() if a in reuse and j.equals(join, with_filtered_relation=False) ] else: reuse_aliases = [ a for a, j in self.alias_map.items() if (reuse is None or a in reuse) and j == join ] if reuse_aliases: if join.table_alias in reuse_aliases: reuse_alias = join.table_alias else: # Reuse the most recent alias of the joined table # (a many-to-many relation may be joined multiple times). reuse_alias = reuse_aliases[-1] self.ref_alias(reuse_alias) return reuse_alias # No reuse is possible, so we need a new alias. alias, _ = self.table_alias(join.table_name, create=True, filtered_relation=join.filtered_relation) if join.join_type: if self.alias_map[join.parent_alias].join_type == LOUTER or join.nullable: join_type = LOUTER else: join_type = INNER join.join_type = join_type join.table_alias = alias self.alias_map[alias] = join return alias def join_parent_model(self, opts, model, alias, seen): """ Make sure the given 'model' is joined in the query. If 'model' isn't a parent of 'opts' or if it is None this method is a no-op. The 'alias' is the root alias for starting the join, 'seen' is a dict of model -> alias of existing joins. It must also contain a mapping of None -> some alias. This will be returned in the no-op case. """ if model in seen: return seen[model] chain = opts.get_base_chain(model) if not chain: return alias curr_opts = opts for int_model in chain: if int_model in seen: curr_opts = int_model._meta alias = seen[int_model] continue # Proxy model have elements in base chain # with no parents, assign the new options # object and skip to the next base in that # case if not curr_opts.parents[int_model]: curr_opts = int_model._meta continue link_field = curr_opts.get_ancestor_link(int_model) join_info = self.setup_joins([link_field.name], curr_opts, alias) curr_opts = int_model._meta alias = seen[int_model] = join_info.joins[-1] return alias or seen[None] def add_annotation(self, annotation, alias, is_summary=False): """Add a single annotation expression to the Query.""" annotation = annotation.resolve_expression(self, allow_joins=True, reuse=None, summarize=is_summary) self.append_annotation_mask([alias]) self.annotations[alias] = annotation def resolve_expression(self, query, *args, **kwargs): clone = self.clone() # Subqueries need to use a different set of aliases than the outer query. clone.bump_prefix(query) clone.subquery = True # It's safe to drop ordering if the queryset isn't using slicing, # distinct(*fields) or select_for_update(). if (self.low_mark == 0 and self.high_mark is None and not self.distinct_fields and not self.select_for_update): clone.clear_ordering(True) clone.where.resolve_expression(query, *args, **kwargs) for key, value in clone.annotations.items(): resolved = value.resolve_expression(query, *args, **kwargs) if hasattr(resolved, 'external_aliases'): resolved.external_aliases.update(clone.alias_map) clone.annotations[key] = resolved # Outer query's aliases are considered external. clone.external_aliases.update( alias for alias, table in query.alias_map.items() if ( isinstance(table, Join) and table.join_field.related_model._meta.db_table != alias ) or ( isinstance(table, BaseTable) and table.table_name != table.table_alias ) ) return clone def as_sql(self, compiler, connection): sql, params = self.get_compiler(connection=connection).as_sql() if self.subquery: sql = '(%s)' % sql return sql, params def resolve_lookup_value(self, value, can_reuse, allow_joins, simple_col): if hasattr(value, 'resolve_expression'): kwargs = {'reuse': can_reuse, 'allow_joins': allow_joins} if isinstance(value, F): kwargs['simple_col'] = simple_col value = value.resolve_expression(self, **kwargs) elif isinstance(value, (list, tuple)): # The items of the iterable may be expressions and therefore need # to be resolved independently. for sub_value in value: if hasattr(sub_value, 'resolve_expression'): if isinstance(sub_value, F): sub_value.resolve_expression( self, reuse=can_reuse, allow_joins=allow_joins, simple_col=simple_col, ) else: sub_value.resolve_expression(self, reuse=can_reuse, allow_joins=allow_joins) return value def solve_lookup_type(self, lookup): """ Solve the lookup type from the lookup (e.g.: 'foobar__id__icontains'). """ lookup_splitted = lookup.split(LOOKUP_SEP) if self.annotations: expression, expression_lookups = refs_expression(lookup_splitted, self.annotations) if expression: return expression_lookups, (), expression _, field, _, lookup_parts = self.names_to_path(lookup_splitted, self.get_meta()) field_parts = lookup_splitted[0:len(lookup_splitted) - len(lookup_parts)] if len(lookup_parts) > 1 and not field_parts: raise FieldError( 'Invalid lookup "%s" for model %s".' % (lookup, self.get_meta().model.__name__) ) return lookup_parts, field_parts, False def check_query_object_type(self, value, opts, field): """ Check whether the object passed while querying is of the correct type. If not, raise a ValueError specifying the wrong object. """ if hasattr(value, '_meta'): if not check_rel_lookup_compatibility(value._meta.model, opts, field): raise ValueError( 'Cannot query "%s": Must be "%s" instance.' % (value, opts.object_name)) def check_related_objects(self, field, value, opts): """Check the type of object passed to query relations.""" if field.is_relation: # Check that the field and the queryset use the same model in a # query like .filter(author=Author.objects.all()). For example, the # opts would be Author's (from the author field) and value.model # would be Author.objects.all() queryset's .model (Author also). # The field is the related field on the lhs side. if (isinstance(value, Query) and not value.has_select_fields and not check_rel_lookup_compatibility(value.model, opts, field)): raise ValueError( 'Cannot use QuerySet for "%s": Use a QuerySet for "%s".' % (value.model._meta.object_name, opts.object_name) ) elif hasattr(value, '_meta'): self.check_query_object_type(value, opts, field) elif hasattr(value, '__iter__'): for v in value: self.check_query_object_type(v, opts, field) def check_filterable(self, expression): """Raise an error if expression cannot be used in a WHERE clause.""" if not getattr(expression, 'filterable', 'True'): raise NotSupportedError( expression.__class__.__name__ + ' is disallowed in the filter ' 'clause.' ) if hasattr(expression, 'get_source_expressions'): for expr in expression.get_source_expressions(): self.check_filterable(expr) def build_lookup(self, lookups, lhs, rhs): """ Try to extract transforms and lookup from given lhs. The lhs value is something that works like SQLExpression. The rhs value is what the lookup is going to compare against. The lookups is a list of names to extract using get_lookup() and get_transform(). """ # __exact is the default lookup if one isn't given. *transforms, lookup_name = lookups or ['exact'] for name in transforms: lhs = self.try_transform(lhs, name) # First try get_lookup() so that the lookup takes precedence if the lhs # supports both transform and lookup for the name. lookup_class = lhs.get_lookup(lookup_name) if not lookup_class: if lhs.field.is_relation: raise FieldError('Related Field got invalid lookup: {}'.format(lookup_name)) # A lookup wasn't found. Try to interpret the name as a transform # and do an Exact lookup against it. lhs = self.try_transform(lhs, lookup_name) lookup_name = 'exact' lookup_class = lhs.get_lookup(lookup_name) if not lookup_class: return lookup = lookup_class(lhs, rhs) # Interpret '__exact=None' as the sql 'is NULL'; otherwise, reject all # uses of None as a query value unless the lookup supports it. if lookup.rhs is None and not lookup.can_use_none_as_rhs: if lookup_name not in ('exact', 'iexact'): raise ValueError("Cannot use None as a query value") return lhs.get_lookup('isnull')(lhs, True) # For Oracle '' is equivalent to null. The check must be done at this # stage because join promotion can't be done in the compiler. Using # DEFAULT_DB_ALIAS isn't nice but it's the best that can be done here. # A similar thing is done in is_nullable(), too. if (connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls and lookup_name == 'exact' and lookup.rhs == ''): return lhs.get_lookup('isnull')(lhs, True) return lookup def try_transform(self, lhs, name): """ Helper method for build_lookup(). Try to fetch and initialize a transform for name parameter from lhs. """ transform_class = lhs.get_transform(name) if transform_class: return transform_class(lhs) else: output_field = lhs.output_field.__class__ suggested_lookups = difflib.get_close_matches(name, output_field.get_lookups()) if suggested_lookups: suggestion = ', perhaps you meant %s?' % ' or '.join(suggested_lookups) else: suggestion = '.' raise FieldError( "Unsupported lookup '%s' for %s or join on the field not " "permitted%s" % (name, output_field.__name__, suggestion) ) def build_filter(self, filter_expr, branch_negated=False, current_negated=False, can_reuse=None, allow_joins=True, split_subq=True, reuse_with_filtered_relation=False, simple_col=False): """ Build a WhereNode for a single filter clause but don't add it to this Query. Query.add_q() will then add this filter to the where Node. The 'branch_negated' tells us if the current branch contains any negations. This will be used to determine if subqueries are needed. The 'current_negated' is used to determine if the current filter is negated or not and this will be used to determine if IS NULL filtering is needed. The difference between current_negated and branch_negated is that branch_negated is set on first negation, but current_negated is flipped for each negation. Note that add_filter will not do any negating itself, that is done upper in the code by add_q(). The 'can_reuse' is a set of reusable joins for multijoins. If 'reuse_with_filtered_relation' is True, then only joins in can_reuse will be reused. The method will create a filter clause that can be added to the current query. However, if the filter isn't added to the query then the caller is responsible for unreffing the joins used. """ if isinstance(filter_expr, dict): raise FieldError("Cannot parse keyword query as dict") arg, value = filter_expr if not arg: raise FieldError("Cannot parse keyword query %r" % arg) lookups, parts, reffed_expression = self.solve_lookup_type(arg) self.check_filterable(reffed_expression) if not allow_joins and len(parts) > 1: raise FieldError("Joined field references are not permitted in this query") pre_joins = self.alias_refcount.copy() value = self.resolve_lookup_value(value, can_reuse, allow_joins, simple_col) used_joins = {k for k, v in self.alias_refcount.items() if v > pre_joins.get(k, 0)} self.check_filterable(value) clause = self.where_class() if reffed_expression: condition = self.build_lookup(lookups, reffed_expression, value) clause.add(condition, AND) return clause, [] opts = self.get_meta() alias = self.get_initial_alias() allow_many = not branch_negated or not split_subq try: join_info = self.setup_joins( parts, opts, alias, can_reuse=can_reuse, allow_many=allow_many, reuse_with_filtered_relation=reuse_with_filtered_relation, ) # Prevent iterator from being consumed by check_related_objects() if isinstance(value, Iterator): value = list(value) self.check_related_objects(join_info.final_field, value, join_info.opts) # split_exclude() needs to know which joins were generated for the # lookup parts self._lookup_joins = join_info.joins except MultiJoin as e: return self.split_exclude(filter_expr, can_reuse, e.names_with_path) # Update used_joins before trimming since they are reused to determine # which joins could be later promoted to INNER. used_joins.update(join_info.joins) targets, alias, join_list = self.trim_joins(join_info.targets, join_info.joins, join_info.path) if can_reuse is not None: can_reuse.update(join_list) if join_info.final_field.is_relation: # No support for transforms for relational fields num_lookups = len(lookups) if num_lookups > 1: raise FieldError('Related Field got invalid lookup: {}'.format(lookups[0])) if len(targets) == 1: col = _get_col(targets[0], join_info.final_field, alias, simple_col) else: col = MultiColSource(alias, targets, join_info.targets, join_info.final_field) else: col = _get_col(targets[0], join_info.final_field, alias, simple_col) condition = self.build_lookup(lookups, col, value) lookup_type = condition.lookup_name clause.add(condition, AND) require_outer = lookup_type == 'isnull' and condition.rhs is True and not current_negated if current_negated and (lookup_type != 'isnull' or condition.rhs is False) and condition.rhs is not None: require_outer = True if (lookup_type != 'isnull' and ( self.is_nullable(targets[0]) or self.alias_map[join_list[-1]].join_type == LOUTER)): # The condition added here will be SQL like this: # NOT (col IS NOT NULL), where the first NOT is added in # upper layers of code. The reason for addition is that if col # is null, then col != someval will result in SQL "unknown" # which isn't the same as in Python. The Python None handling # is wanted, and it can be gotten by # (col IS NULL OR col != someval) # <=> # NOT (col IS NOT NULL AND col = someval). lookup_class = targets[0].get_lookup('isnull') col = _get_col(targets[0], join_info.targets[0], alias, simple_col) clause.add(lookup_class(col, False), AND) return clause, used_joins if not require_outer else () def add_filter(self, filter_clause): self.add_q(Q(**{filter_clause[0]: filter_clause[1]})) def add_q(self, q_object): """ A preprocessor for the internal _add_q(). Responsible for doing final join promotion. """ # For join promotion this case is doing an AND for the added q_object # and existing conditions. So, any existing inner join forces the join # type to remain inner. Existing outer joins can however be demoted. # (Consider case where rel_a is LOUTER and rel_a__col=1 is added - if # rel_a doesn't produce any rows, then the whole condition must fail. # So, demotion is OK. existing_inner = {a for a in self.alias_map if self.alias_map[a].join_type == INNER} clause, _ = self._add_q(q_object, self.used_aliases) if clause: self.where.add(clause, AND) self.demote_joins(existing_inner) def build_where(self, q_object): return self._add_q(q_object, used_aliases=set(), allow_joins=False, simple_col=True)[0] def _add_q(self, q_object, used_aliases, branch_negated=False, current_negated=False, allow_joins=True, split_subq=True, simple_col=False): """Add a Q-object to the current filter.""" connector = q_object.connector current_negated = current_negated ^ q_object.negated branch_negated = branch_negated or q_object.negated target_clause = self.where_class(connector=connector, negated=q_object.negated) joinpromoter = JoinPromoter(q_object.connector, len(q_object.children), current_negated) for child in q_object.children: if isinstance(child, Node): child_clause, needed_inner = self._add_q( child, used_aliases, branch_negated, current_negated, allow_joins, split_subq, simple_col) joinpromoter.add_votes(needed_inner) else: child_clause, needed_inner = self.build_filter( child, can_reuse=used_aliases, branch_negated=branch_negated, current_negated=current_negated, allow_joins=allow_joins, split_subq=split_subq, simple_col=simple_col, ) joinpromoter.add_votes(needed_inner) if child_clause: target_clause.add(child_clause, connector) needed_inner = joinpromoter.update_join_types(self) return target_clause, needed_inner def build_filtered_relation_q(self, q_object, reuse, branch_negated=False, current_negated=False): """Add a FilteredRelation object to the current filter.""" connector = q_object.connector current_negated ^= q_object.negated branch_negated = branch_negated or q_object.negated target_clause = self.where_class(connector=connector, negated=q_object.negated) for child in q_object.children: if isinstance(child, Node): child_clause = self.build_filtered_relation_q( child, reuse=reuse, branch_negated=branch_negated, current_negated=current_negated, ) else: child_clause, _ = self.build_filter( child, can_reuse=reuse, branch_negated=branch_negated, current_negated=current_negated, allow_joins=True, split_subq=False, reuse_with_filtered_relation=True, ) target_clause.add(child_clause, connector) return target_clause def add_filtered_relation(self, filtered_relation, alias): filtered_relation.alias = alias lookups = dict(get_children_from_q(filtered_relation.condition)) for lookup in chain((filtered_relation.relation_name,), lookups): lookup_parts, field_parts, _ = self.solve_lookup_type(lookup) shift = 2 if not lookup_parts else 1 if len(field_parts) > (shift + len(lookup_parts)): raise ValueError( "FilteredRelation's condition doesn't support nested " "relations (got %r)." % lookup ) self._filtered_relations[filtered_relation.alias] = filtered_relation def names_to_path(self, names, opts, allow_many=True, fail_on_missing=False): """ Walk the list of names and turns them into PathInfo tuples. A single name in 'names' can generate multiple PathInfos (m2m, for example). 'names' is the path of names to travel, 'opts' is the model Options we start the name resolving from, 'allow_many' is as for setup_joins(). If fail_on_missing is set to True, then a name that can't be resolved will generate a FieldError. Return a list of PathInfo tuples. In addition return the final field (the last used join field) and target (which is a field guaranteed to contain the same value as the final field). Finally, return those names that weren't found (which are likely transforms and the final lookup). """ path, names_with_path = [], [] for pos, name in enumerate(names): cur_names_with_path = (name, []) if name == 'pk': name = opts.pk.name field = None filtered_relation = None try: field = opts.get_field(name) except FieldDoesNotExist: if name in self.annotation_select: field = self.annotation_select[name].output_field elif name in self._filtered_relations and pos == 0: filtered_relation = self._filtered_relations[name] field = opts.get_field(filtered_relation.relation_name) if field is not None: # Fields that contain one-to-many relations with a generic # model (like a GenericForeignKey) cannot generate reverse # relations and therefore cannot be used for reverse querying. if field.is_relation and not field.related_model: raise FieldError( "Field %r does not generate an automatic reverse " "relation and therefore cannot be used for reverse " "querying. If it is a GenericForeignKey, consider " "adding a GenericRelation." % name ) try: model = field.model._meta.concrete_model except AttributeError: # QuerySet.annotate() may introduce fields that aren't # attached to a model. model = None else: # We didn't find the current field, so move position back # one step. pos -= 1 if pos == -1 or fail_on_missing: available = sorted([ *get_field_names_from_opts(opts), *self.annotation_select, *self._filtered_relations, ]) raise FieldError("Cannot resolve keyword '%s' into field. " "Choices are: %s" % (name, ", ".join(available))) break # Check if we need any joins for concrete inheritance cases (the # field lives in parent, but we are currently in one of its # children) if model is not opts.model: path_to_parent = opts.get_path_to_parent(model) if path_to_parent: path.extend(path_to_parent) cur_names_with_path[1].extend(path_to_parent) opts = path_to_parent[-1].to_opts if hasattr(field, 'get_path_info'): pathinfos = field.get_path_info(filtered_relation) if not allow_many: for inner_pos, p in enumerate(pathinfos): if p.m2m: cur_names_with_path[1].extend(pathinfos[0:inner_pos + 1]) names_with_path.append(cur_names_with_path) raise MultiJoin(pos + 1, names_with_path) last = pathinfos[-1] path.extend(pathinfos) final_field = last.join_field opts = last.to_opts targets = last.target_fields cur_names_with_path[1].extend(pathinfos) names_with_path.append(cur_names_with_path) else: # Local non-relational field. final_field = field targets = (field,) if fail_on_missing and pos + 1 != len(names): raise FieldError( "Cannot resolve keyword %r into field. Join on '%s'" " not permitted." % (names[pos + 1], name)) break return path, final_field, targets, names[pos + 1:] def setup_joins(self, names, opts, alias, can_reuse=None, allow_many=True, reuse_with_filtered_relation=False): """ Compute the necessary table joins for the passage through the fields given in 'names'. 'opts' is the Options class for the current model (which gives the table we are starting from), 'alias' is the alias for the table to start the joining from. The 'can_reuse' defines the reverse foreign key joins we can reuse. It can be None in which case all joins are reusable or a set of aliases that can be reused. Note that non-reverse foreign keys are always reusable when using setup_joins(). The 'reuse_with_filtered_relation' can be used to force 'can_reuse' parameter and force the relation on the given connections. If 'allow_many' is False, then any reverse foreign key seen will generate a MultiJoin exception. Return the final field involved in the joins, the target field (used for any 'where' constraint), the final 'opts' value, the joins, the field path traveled to generate the joins, and a transform function that takes a field and alias and is equivalent to `field.get_col(alias)` in the simple case but wraps field transforms if they were included in names. The target field is the field containing the concrete value. Final field can be something different, for example foreign key pointing to that value. Final field is needed for example in some value conversions (convert 'obj' in fk__id=obj to pk val using the foreign key field for example). """ joins = [alias] # The transform can't be applied yet, as joins must be trimmed later. # To avoid making every caller of this method look up transforms # directly, compute transforms here and create a partial that converts # fields to the appropriate wrapped version. def final_transformer(field, alias): return field.get_col(alias) # Try resolving all the names as fields first. If there's an error, # treat trailing names as lookups until a field can be resolved. last_field_exception = None for pivot in range(len(names), 0, -1): try: path, final_field, targets, rest = self.names_to_path( names[:pivot], opts, allow_many, fail_on_missing=True, ) except FieldError as exc: if pivot == 1: # The first item cannot be a lookup, so it's safe # to raise the field error here. raise else: last_field_exception = exc else: # The transforms are the remaining items that couldn't be # resolved into fields. transforms = names[pivot:] break for name in transforms: def transform(field, alias, *, name, previous): try: wrapped = previous(field, alias) return self.try_transform(wrapped, name) except FieldError: # FieldError is raised if the transform doesn't exist. if isinstance(final_field, Field) and last_field_exception: raise last_field_exception else: raise final_transformer = functools.partial(transform, name=name, previous=final_transformer) # Then, add the path to the query's joins. Note that we can't trim # joins at this stage - we will need the information about join type # of the trimmed joins. for join in path: if join.filtered_relation: filtered_relation = join.filtered_relation.clone() table_alias = filtered_relation.alias else: filtered_relation = None table_alias = None opts = join.to_opts if join.direct: nullable = self.is_nullable(join.join_field) else: nullable = True connection = Join( opts.db_table, alias, table_alias, INNER, join.join_field, nullable, filtered_relation=filtered_relation, ) reuse = can_reuse if join.m2m or reuse_with_filtered_relation else None alias = self.join( connection, reuse=reuse, reuse_with_filtered_relation=reuse_with_filtered_relation, ) joins.append(alias) if filtered_relation: filtered_relation.path = joins[:] return JoinInfo(final_field, targets, opts, joins, path, final_transformer) def trim_joins(self, targets, joins, path): """ The 'target' parameter is the final field being joined to, 'joins' is the full list of join aliases. The 'path' contain the PathInfos used to create the joins. Return the final target field and table alias and the new active joins. Always trim any direct join if the target column is already in the previous table. Can't trim reverse joins as it's unknown if there's anything on the other side of the join. """ joins = joins[:] for pos, info in enumerate(reversed(path)): if len(joins) == 1 or not info.direct: break if info.filtered_relation: break join_targets = {t.column for t in info.join_field.foreign_related_fields} cur_targets = {t.column for t in targets} if not cur_targets.issubset(join_targets): break targets_dict = {r[1].column: r[0] for r in info.join_field.related_fields if r[1].column in cur_targets} targets = tuple(targets_dict[t.column] for t in targets) self.unref_alias(joins.pop()) return targets, joins[-1], joins @classmethod def _gen_col_aliases(cls, exprs): for expr in exprs: if isinstance(expr, Col): yield expr.alias else: yield from cls._gen_col_aliases(expr.get_source_expressions()) def resolve_ref(self, name, allow_joins=True, reuse=None, summarize=False, simple_col=False): if not allow_joins and LOOKUP_SEP in name: raise FieldError("Joined field references are not permitted in this query") annotation = self.annotations.get(name) if annotation is not None: if not allow_joins: for alias in self._gen_col_aliases([annotation]): if isinstance(self.alias_map[alias], Join): raise FieldError( 'Joined field references are not permitted in ' 'this query' ) if summarize: # Summarize currently means we are doing an aggregate() query # which is executed as a wrapped subquery if any of the # aggregate() elements reference an existing annotation. In # that case we need to return a Ref to the subquery's annotation. return Ref(name, self.annotation_select[name]) else: return annotation else: field_list = name.split(LOOKUP_SEP) join_info = self.setup_joins(field_list, self.get_meta(), self.get_initial_alias(), can_reuse=reuse) targets, final_alias, join_list = self.trim_joins(join_info.targets, join_info.joins, join_info.path) if not allow_joins and len(join_list) > 1: raise FieldError('Joined field references are not permitted in this query') if len(targets) > 1: raise FieldError("Referencing multicolumn fields with F() objects " "isn't supported") # Verify that the last lookup in name is a field or a transform: # transform_function() raises FieldError if not. join_info.transform_function(targets[0], final_alias) if reuse is not None: reuse.update(join_list) col = _get_col(targets[0], join_info.targets[0], join_list[-1], simple_col) return col def split_exclude(self, filter_expr, can_reuse, names_with_path): """ When doing an exclude against any kind of N-to-many relation, we need to use a subquery. This method constructs the nested query, given the original exclude filter (filter_expr) and the portion up to the first N-to-many relation field. For example, if the origin filter is ~Q(child__name='foo'), filter_expr is ('child__name', 'foo') and can_reuse is a set of joins usable for filters in the original query. We will turn this into equivalent of: WHERE NOT (pk IN (SELECT parent_id FROM thetable WHERE name = 'foo' AND parent_id IS NOT NULL)) It might be worth it to consider using WHERE NOT EXISTS as that has saner null handling, and is easier for the backend's optimizer to handle. """ filter_lhs, filter_rhs = filter_expr if isinstance(filter_rhs, F): filter_expr = (filter_lhs, OuterRef(filter_rhs.name)) # Generate the inner query. query = Query(self.model) query._filtered_relations = self._filtered_relations query.add_filter(filter_expr) query.clear_ordering(True) # Try to have as simple as possible subquery -> trim leading joins from # the subquery. trimmed_prefix, contains_louter = query.trim_start(names_with_path) # Add extra check to make sure the selected field will not be null # since we are adding an IN <subquery> clause. This prevents the # database from tripping over IN (...,NULL,...) selects and returning # nothing col = query.select[0] select_field = col.target alias = col.alias if self.is_nullable(select_field): lookup_class = select_field.get_lookup('isnull') lookup = lookup_class(select_field.get_col(alias), False) query.where.add(lookup, AND) if alias in can_reuse: pk = select_field.model._meta.pk # Need to add a restriction so that outer query's filters are in effect for # the subquery, too. query.bump_prefix(self) lookup_class = select_field.get_lookup('exact') # Note that the query.select[0].alias is different from alias # due to bump_prefix above. lookup = lookup_class(pk.get_col(query.select[0].alias), pk.get_col(alias)) query.where.add(lookup, AND) query.external_aliases.add(alias) condition, needed_inner = self.build_filter( ('%s__in' % trimmed_prefix, query), current_negated=True, branch_negated=True, can_reuse=can_reuse) if contains_louter: or_null_condition, _ = self.build_filter( ('%s__isnull' % trimmed_prefix, True), current_negated=True, branch_negated=True, can_reuse=can_reuse) condition.add(or_null_condition, OR) # Note that the end result will be: # (outercol NOT IN innerq AND outercol IS NOT NULL) OR outercol IS NULL. # This might look crazy but due to how IN works, this seems to be # correct. If the IS NOT NULL check is removed then outercol NOT # IN will return UNKNOWN. If the IS NULL check is removed, then if # outercol IS NULL we will not match the row. return condition, needed_inner def set_empty(self): self.where.add(NothingNode(), AND) def is_empty(self): return any(isinstance(c, NothingNode) for c in self.where.children) def set_limits(self, low=None, high=None): """ Adjust the limits on the rows retrieved. Use low/high to set these, as it makes it more Pythonic to read and write. When the SQL query is created, convert them to the appropriate offset and limit values. Apply any limits passed in here to the existing constraints. Add low to the current low value and clamp both to any existing high value. """ if high is not None: if self.high_mark is not None: self.high_mark = min(self.high_mark, self.low_mark + high) else: self.high_mark = self.low_mark + high if low is not None: if self.high_mark is not None: self.low_mark = min(self.high_mark, self.low_mark + low) else: self.low_mark = self.low_mark + low if self.low_mark == self.high_mark: self.set_empty() def clear_limits(self): """Clear any existing limits.""" self.low_mark, self.high_mark = 0, None @property def is_sliced(self): return self.low_mark != 0 or self.high_mark is not None def has_limit_one(self): return self.high_mark is not None and (self.high_mark - self.low_mark) == 1 def can_filter(self): """ Return True if adding filters to this instance is still possible. Typically, this means no limits or offsets have been put on the results. """ return not self.is_sliced def clear_select_clause(self): """Remove all fields from SELECT clause.""" self.select = () self.default_cols = False self.select_related = False self.set_extra_mask(()) self.set_annotation_mask(()) def clear_select_fields(self): """ Clear the list of fields to select (but not extra_select columns). Some queryset types completely replace any existing list of select columns. """ self.select = () self.values_select = () def add_select_col(self, col): self.select += col, self.values_select += col.output_field.name, def set_select(self, cols): self.default_cols = False self.select = tuple(cols) def add_distinct_fields(self, *field_names): """ Add and resolve the given fields to the query's "distinct on" clause. """ self.distinct_fields = field_names self.distinct = True def add_fields(self, field_names, allow_m2m=True): """ Add the given (model) fields to the select set. Add the field names in the order specified. """ alias = self.get_initial_alias() opts = self.get_meta() try: cols = [] for name in field_names: # Join promotion note - we must not remove any rows here, so # if there is no existing joins, use outer join. join_info = self.setup_joins(name.split(LOOKUP_SEP), opts, alias, allow_many=allow_m2m) targets, final_alias, joins = self.trim_joins( join_info.targets, join_info.joins, join_info.path, ) for target in targets: cols.append(join_info.transform_function(target, final_alias)) if cols: self.set_select(cols) except MultiJoin: raise FieldError("Invalid field name: '%s'" % name) except FieldError: if LOOKUP_SEP in name: # For lookups spanning over relationships, show the error # from the model on which the lookup failed. raise else: names = sorted([ *get_field_names_from_opts(opts), *self.extra, *self.annotation_select, *self._filtered_relations ]) raise FieldError("Cannot resolve keyword %r into field. " "Choices are: %s" % (name, ", ".join(names))) def add_ordering(self, *ordering): """ Add items from the 'ordering' sequence to the query's "order by" clause. These items are either field names (not column names) -- possibly with a direction prefix ('-' or '?') -- or OrderBy expressions. If 'ordering' is empty, clear all ordering from the query. """ errors = [] for item in ordering: if not hasattr(item, 'resolve_expression') and not ORDER_PATTERN.match(item): errors.append(item) if getattr(item, 'contains_aggregate', False): raise FieldError( 'Using an aggregate in order_by() without also including ' 'it in annotate() is not allowed: %s' % item ) if errors: raise FieldError('Invalid order_by arguments: %s' % errors) if ordering: self.order_by += ordering else: self.default_ordering = False def clear_ordering(self, force_empty): """ Remove any ordering settings. If 'force_empty' is True, there will be no ordering in the resulting query (not even the model's default). """ self.order_by = () self.extra_order_by = () if force_empty: self.default_ordering = False def set_group_by(self): """ Expand the GROUP BY clause required by the query. This will usually be the set of all non-aggregate fields in the return data. If the database backend supports grouping by the primary key, and the query would be equivalent, the optimization will be made automatically. """ group_by = list(self.select) if self.annotation_select: for alias, annotation in self.annotation_select.items(): try: inspect.getcallargs(annotation.get_group_by_cols, alias=alias) except TypeError: annotation_class = annotation.__class__ msg = ( '`alias=None` must be added to the signature of ' '%s.%s.get_group_by_cols().' ) % (annotation_class.__module__, annotation_class.__qualname__) warnings.warn(msg, category=RemovedInDjango40Warning) group_by_cols = annotation.get_group_by_cols() else: group_by_cols = annotation.get_group_by_cols(alias=alias) group_by.extend(group_by_cols) self.group_by = tuple(group_by) def add_select_related(self, fields): """ Set up the select_related data structure so that we only select certain related models (as opposed to all models, when self.select_related=True). """ if isinstance(self.select_related, bool): field_dict = {} else: field_dict = self.select_related for field in fields: d = field_dict for part in field.split(LOOKUP_SEP): d = d.setdefault(part, {}) self.select_related = field_dict def add_extra(self, select, select_params, where, params, tables, order_by): """ Add data to the various extra_* attributes for user-created additions to the query. """ if select: # We need to pair any placeholder markers in the 'select' # dictionary with their parameters in 'select_params' so that # subsequent updates to the select dictionary also adjust the # parameters appropriately. select_pairs = {} if select_params: param_iter = iter(select_params) else: param_iter = iter([]) for name, entry in select.items(): entry = str(entry) entry_params = [] pos = entry.find("%s") while pos != -1: if pos == 0 or entry[pos - 1] != '%': entry_params.append(next(param_iter)) pos = entry.find("%s", pos + 2) select_pairs[name] = (entry, entry_params) self.extra.update(select_pairs) if where or params: self.where.add(ExtraWhere(where, params), AND) if tables: self.extra_tables += tuple(tables) if order_by: self.extra_order_by = order_by def clear_deferred_loading(self): """Remove any fields from the deferred loading set.""" self.deferred_loading = (frozenset(), True) def add_deferred_loading(self, field_names): """ Add the given list of model field names to the set of fields to exclude from loading from the database when automatic column selection is done. Add the new field names to any existing field names that are deferred (or removed from any existing field names that are marked as the only ones for immediate loading). """ # Fields on related models are stored in the literal double-underscore # format, so that we can use a set datastructure. We do the foo__bar # splitting and handling when computing the SQL column names (as part of # get_columns()). existing, defer = self.deferred_loading if defer: # Add to existing deferred names. self.deferred_loading = existing.union(field_names), True else: # Remove names from the set of any existing "immediate load" names. self.deferred_loading = existing.difference(field_names), False def add_immediate_loading(self, field_names): """ Add the given list of model field names to the set of fields to retrieve when the SQL is executed ("immediate loading" fields). The field names replace any existing immediate loading field names. If there are field names already specified for deferred loading, remove those names from the new field_names before storing the new names for immediate loading. (That is, immediate loading overrides any existing immediate values, but respects existing deferrals.) """ existing, defer = self.deferred_loading field_names = set(field_names) if 'pk' in field_names: field_names.remove('pk') field_names.add(self.get_meta().pk.name) if defer: # Remove any existing deferred names from the current set before # setting the new names. self.deferred_loading = field_names.difference(existing), False else: # Replace any existing "immediate load" field names. self.deferred_loading = frozenset(field_names), False def get_loaded_field_names(self): """ If any fields are marked to be deferred, return a dictionary mapping models to a set of names in those fields that will be loaded. If a model is not in the returned dictionary, none of its fields are deferred. If no fields are marked for deferral, return an empty dictionary. """ # We cache this because we call this function multiple times # (compiler.fill_related_selections, query.iterator) try: return self._loaded_field_names_cache except AttributeError: collection = {} self.deferred_to_data(collection, self.get_loaded_field_names_cb) self._loaded_field_names_cache = collection return collection def get_loaded_field_names_cb(self, target, model, fields): """Callback used by get_deferred_field_names().""" target[model] = {f.attname for f in fields} def set_annotation_mask(self, names): """Set the mask of annotations that will be returned by the SELECT.""" if names is None: self.annotation_select_mask = None else: self.annotation_select_mask = set(names) self._annotation_select_cache = None def append_annotation_mask(self, names): if self.annotation_select_mask is not None: self.set_annotation_mask(self.annotation_select_mask.union(names)) def set_extra_mask(self, names): """ Set the mask of extra select items that will be returned by SELECT. Don't remove them from the Query since they might be used later. """ if names is None: self.extra_select_mask = None else: self.extra_select_mask = set(names) self._extra_select_cache = None def set_values(self, fields): self.select_related = False self.clear_deferred_loading() self.clear_select_fields() if self.group_by is True: self.add_fields((f.attname for f in self.model._meta.concrete_fields), False) self.set_group_by() self.clear_select_fields() if fields: field_names = [] extra_names = [] annotation_names = [] if not self.extra and not self.annotations: # Shortcut - if there are no extra or annotations, then # the values() clause must be just field names. field_names = list(fields) else: self.default_cols = False for f in fields: if f in self.extra_select: extra_names.append(f) elif f in self.annotation_select: annotation_names.append(f) else: field_names.append(f) self.set_extra_mask(extra_names) self.set_annotation_mask(annotation_names) else: field_names = [f.attname for f in self.model._meta.concrete_fields] self.values_select = tuple(field_names) self.add_fields(field_names, True) @property def annotation_select(self): """ Return the dictionary of aggregate columns that are not masked and should be used in the SELECT clause. Cache this result for performance. """ if self._annotation_select_cache is not None: return self._annotation_select_cache elif not self.annotations: return {} elif self.annotation_select_mask is not None: self._annotation_select_cache = { k: v for k, v in self.annotations.items() if k in self.annotation_select_mask } return self._annotation_select_cache else: return self.annotations @property def extra_select(self): if self._extra_select_cache is not None: return self._extra_select_cache if not self.extra: return {} elif self.extra_select_mask is not None: self._extra_select_cache = { k: v for k, v in self.extra.items() if k in self.extra_select_mask } return self._extra_select_cache else: return self.extra def trim_start(self, names_with_path): """ Trim joins from the start of the join path. The candidates for trim are the PathInfos in names_with_path structure that are m2m joins. Also set the select column so the start matches the join. This method is meant to be used for generating the subquery joins & cols in split_exclude(). Return a lookup usable for doing outerq.filter(lookup=self) and a boolean indicating if the joins in the prefix contain a LEFT OUTER join. _""" all_paths = [] for _, paths in names_with_path: all_paths.extend(paths) contains_louter = False # Trim and operate only on tables that were generated for # the lookup part of the query. That is, avoid trimming # joins generated for F() expressions. lookup_tables = [ t for t in self.alias_map if t in self._lookup_joins or t == self.base_table ] for trimmed_paths, path in enumerate(all_paths): if path.m2m: break if self.alias_map[lookup_tables[trimmed_paths + 1]].join_type == LOUTER: contains_louter = True alias = lookup_tables[trimmed_paths] self.unref_alias(alias) # The path.join_field is a Rel, lets get the other side's field join_field = path.join_field.field # Build the filter prefix. paths_in_prefix = trimmed_paths trimmed_prefix = [] for name, path in names_with_path: if paths_in_prefix - len(path) < 0: break trimmed_prefix.append(name) paths_in_prefix -= len(path) trimmed_prefix.append( join_field.foreign_related_fields[0].name) trimmed_prefix = LOOKUP_SEP.join(trimmed_prefix) # Lets still see if we can trim the first join from the inner query # (that is, self). We can't do this for: # - LEFT JOINs because we would miss those rows that have nothing on # the outer side, # - INNER JOINs from filtered relations because we would miss their # filters. first_join = self.alias_map[lookup_tables[trimmed_paths + 1]] if first_join.join_type != LOUTER and not first_join.filtered_relation: select_fields = [r[0] for r in join_field.related_fields] select_alias = lookup_tables[trimmed_paths + 1] self.unref_alias(lookup_tables[trimmed_paths]) extra_restriction = join_field.get_extra_restriction( self.where_class, None, lookup_tables[trimmed_paths + 1]) if extra_restriction: self.where.add(extra_restriction, AND) else: # TODO: It might be possible to trim more joins from the start of the # inner query if it happens to have a longer join chain containing the # values in select_fields. Lets punt this one for now. select_fields = [r[1] for r in join_field.related_fields] select_alias = lookup_tables[trimmed_paths] # The found starting point is likely a Join instead of a BaseTable reference. # But the first entry in the query's FROM clause must not be a JOIN. for table in self.alias_map: if self.alias_refcount[table] > 0: self.alias_map[table] = BaseTable(self.alias_map[table].table_name, table) break self.set_select([f.get_col(select_alias) for f in select_fields]) return trimmed_prefix, contains_louter def is_nullable(self, field): """ Check if the given field should be treated as nullable. Some backends treat '' as null and Django treats such fields as nullable for those backends. In such situations field.null can be False even if we should treat the field as nullable. """ # We need to use DEFAULT_DB_ALIAS here, as QuerySet does not have # (nor should it have) knowledge of which connection is going to be # used. The proper fix would be to defer all decisions where # is_nullable() is needed to the compiler stage, but that is not easy # to do currently. return ( connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls and field.empty_strings_allowed ) or field.null def get_order_dir(field, default='ASC'): """ Return the field name and direction for an order specification. For example, '-foo' is returned as ('foo', 'DESC'). The 'default' param is used to indicate which way no prefix (or a '+' prefix) should sort. The '-' prefix always sorts the opposite way. """ dirn = ORDER_DIR[default] if field[0] == '-': return field[1:], dirn[1] return field, dirn[0] def add_to_dict(data, key, value): """ Add "value" to the set of values for "key", whether or not "key" already exists. """ if key in data: data[key].add(value) else: data[key] = {value} def is_reverse_o2o(field): """ Check if the given field is reverse-o2o. The field is expected to be some sort of relation field or related object. """ return field.is_relation and field.one_to_one and not field.concrete class JoinPromoter: """ A class to abstract away join promotion problems for complex filter conditions. """ def __init__(self, connector, num_children, negated): self.connector = connector self.negated = negated if self.negated: if connector == AND: self.effective_connector = OR else: self.effective_connector = AND else: self.effective_connector = self.connector self.num_children = num_children # Maps of table alias to how many times it is seen as required for # inner and/or outer joins. self.votes = Counter() def add_votes(self, votes): """ Add single vote per item to self.votes. Parameter can be any iterable. """ self.votes.update(votes) def update_join_types(self, query): """ Change join types so that the generated query is as efficient as possible, but still correct. So, change as many joins as possible to INNER, but don't make OUTER joins INNER if that could remove results from the query. """ to_promote = set() to_demote = set() # The effective_connector is used so that NOT (a AND b) is treated # similarly to (a OR b) for join promotion. for table, votes in self.votes.items(): # We must use outer joins in OR case when the join isn't contained # in all of the joins. Otherwise the INNER JOIN itself could remove # valid results. Consider the case where a model with rel_a and # rel_b relations is queried with rel_a__col=1 | rel_b__col=2. Now, # if rel_a join doesn't produce any results is null (for example # reverse foreign key or null value in direct foreign key), and # there is a matching row in rel_b with col=2, then an INNER join # to rel_a would remove a valid match from the query. So, we need # to promote any existing INNER to LOUTER (it is possible this # promotion in turn will be demoted later on). if self.effective_connector == 'OR' and votes < self.num_children: to_promote.add(table) # If connector is AND and there is a filter that can match only # when there is a joinable row, then use INNER. For example, in # rel_a__col=1 & rel_b__col=2, if either of the rels produce NULL # as join output, then the col=1 or col=2 can't match (as # NULL=anything is always false). # For the OR case, if all children voted for a join to be inner, # then we can use INNER for the join. For example: # (rel_a__col__icontains=Alex | rel_a__col__icontains=Russell) # then if rel_a doesn't produce any rows, the whole condition # can't match. Hence we can safely use INNER join. if self.effective_connector == 'AND' or ( self.effective_connector == 'OR' and votes == self.num_children): to_demote.add(table) # Finally, what happens in cases where we have: # (rel_a__col=1|rel_b__col=2) & rel_a__col__gte=0 # Now, we first generate the OR clause, and promote joins for it # in the first if branch above. Both rel_a and rel_b are promoted # to LOUTER joins. After that we do the AND case. The OR case # voted no inner joins but the rel_a__col__gte=0 votes inner join # for rel_a. We demote it back to INNER join (in AND case a single # vote is enough). The demotion is OK, if rel_a doesn't produce # rows, then the rel_a__col__gte=0 clause can't be true, and thus # the whole clause must be false. So, it is safe to use INNER # join. # Note that in this example we could just as well have the __gte # clause and the OR clause swapped. Or we could replace the __gte # clause with an OR clause containing rel_a__col=1|rel_a__col=2, # and again we could safely demote to INNER. query.promote_joins(to_promote) query.demote_joins(to_demote) return to_demote
1a3739b32b6cd95eda846ea0c6ba1d572caadd05cf175af73a08fb1f227aae58
import collections import re import warnings from itertools import chain from django.core.exceptions import EmptyResultSet, FieldError from django.db.models.constants import LOOKUP_SEP from django.db.models.expressions import OrderBy, Random, RawSQL, Ref, Value from django.db.models.functions import Cast from django.db.models.query_utils import QueryWrapper, select_related_descend from django.db.models.sql.constants import ( CURSOR, GET_ITERATOR_CHUNK_SIZE, MULTI, NO_RESULTS, ORDER_DIR, SINGLE, ) from django.db.models.sql.query import Query, get_order_dir from django.db.transaction import TransactionManagementError from django.db.utils import DatabaseError, NotSupportedError from django.utils.deprecation import RemovedInDjango31Warning from django.utils.hashable import make_hashable class SQLCompiler: def __init__(self, query, connection, using): self.query = query self.connection = connection self.using = using self.quote_cache = {'*': '*'} # The select, klass_info, and annotations are needed by QuerySet.iterator() # these are set as a side-effect of executing the query. Note that we calculate # separately a list of extra select columns needed for grammatical correctness # of the query, but these columns are not included in self.select. self.select = None self.annotation_col_map = None self.klass_info = None # Multiline ordering SQL clause may appear from RawSQL. self.ordering_parts = re.compile(r'^(.*)\s(ASC|DESC)(.*)', re.MULTILINE | re.DOTALL) self._meta_ordering = None def setup_query(self): if all(self.query.alias_refcount[a] == 0 for a in self.query.alias_map): self.query.get_initial_alias() self.select, self.klass_info, self.annotation_col_map = self.get_select() self.col_count = len(self.select) def pre_sql_setup(self): """ Do any necessary class setup immediately prior to producing SQL. This is for things that can't necessarily be done in __init__ because we might not have all the pieces in place at that time. """ self.setup_query() order_by = self.get_order_by() self.where, self.having = self.query.where.split_having() extra_select = self.get_extra_select(order_by, self.select) self.has_extra_select = bool(extra_select) group_by = self.get_group_by(self.select + extra_select, order_by) return extra_select, order_by, group_by def get_group_by(self, select, order_by): """ Return a list of 2-tuples of form (sql, params). The logic of what exactly the GROUP BY clause contains is hard to describe in other words than "if it passes the test suite, then it is correct". """ # Some examples: # SomeModel.objects.annotate(Count('somecol')) # GROUP BY: all fields of the model # # SomeModel.objects.values('name').annotate(Count('somecol')) # GROUP BY: name # # SomeModel.objects.annotate(Count('somecol')).values('name') # GROUP BY: all cols of the model # # SomeModel.objects.values('name', 'pk').annotate(Count('somecol')).values('pk') # GROUP BY: name, pk # # SomeModel.objects.values('name').annotate(Count('somecol')).values('pk') # GROUP BY: name, pk # # In fact, the self.query.group_by is the minimal set to GROUP BY. It # can't be ever restricted to a smaller set, but additional columns in # HAVING, ORDER BY, and SELECT clauses are added to it. Unfortunately # the end result is that it is impossible to force the query to have # a chosen GROUP BY clause - you can almost do this by using the form: # .values(*wanted_cols).annotate(AnAggregate()) # but any later annotations, extra selects, values calls that # refer some column outside of the wanted_cols, order_by, or even # filter calls can alter the GROUP BY clause. # The query.group_by is either None (no GROUP BY at all), True # (group by select fields), or a list of expressions to be added # to the group by. if self.query.group_by is None: return [] expressions = [] if self.query.group_by is not True: # If the group by is set to a list (by .values() call most likely), # then we need to add everything in it to the GROUP BY clause. # Backwards compatibility hack for setting query.group_by. Remove # when we have public API way of forcing the GROUP BY clause. # Converts string references to expressions. for expr in self.query.group_by: if not hasattr(expr, 'as_sql'): expressions.append(self.query.resolve_ref(expr)) else: expressions.append(expr) # Note that even if the group_by is set, it is only the minimal # set to group by. So, we need to add cols in select, order_by, and # having into the select in any case. for expr, _, _ in select: cols = expr.get_group_by_cols() for col in cols: expressions.append(col) for expr, (sql, params, is_ref) in order_by: # Skip References to the select clause, as all expressions in the # select clause are already part of the group by. if not expr.contains_aggregate and not is_ref: expressions.extend(expr.get_source_expressions()) having_group_by = self.having.get_group_by_cols() if self.having else () for expr in having_group_by: expressions.append(expr) result = [] seen = set() expressions = self.collapse_group_by(expressions, having_group_by) for expr in expressions: sql, params = self.compile(expr) params_hash = make_hashable(params) if (sql, params_hash) not in seen: result.append((sql, params)) seen.add((sql, params_hash)) return result def collapse_group_by(self, expressions, having): # If the DB can group by primary key, then group by the primary key of # query's main model. Note that for PostgreSQL the GROUP BY clause must # include the primary key of every table, but for MySQL it is enough to # have the main table's primary key. if self.connection.features.allows_group_by_pk: # Determine if the main model's primary key is in the query. pk = None for expr in expressions: # Is this a reference to query's base table primary key? If the # expression isn't a Col-like, then skip the expression. if (getattr(expr, 'target', None) == self.query.model._meta.pk and getattr(expr, 'alias', None) == self.query.base_table): pk = expr break # If the main model's primary key is in the query, group by that # field, HAVING expressions, and expressions associated with tables # that don't have a primary key included in the grouped columns. if pk: pk_aliases = { expr.alias for expr in expressions if hasattr(expr, 'target') and expr.target.primary_key } expressions = [pk] + [ expr for expr in expressions if expr in having or ( getattr(expr, 'alias', None) is not None and expr.alias not in pk_aliases ) ] elif self.connection.features.allows_group_by_selected_pks: # Filter out all expressions associated with a table's primary key # present in the grouped columns. This is done by identifying all # tables that have their primary key included in the grouped # columns and removing non-primary key columns referring to them. # Unmanaged models are excluded because they could be representing # database views on which the optimization might not be allowed. pks = { expr for expr in expressions if hasattr(expr, 'target') and expr.target.primary_key and expr.target.model._meta.managed } aliases = {expr.alias for expr in pks} expressions = [ expr for expr in expressions if expr in pks or getattr(expr, 'alias', None) not in aliases ] return expressions def get_select(self): """ Return three values: - a list of 3-tuples of (expression, (sql, params), alias) - a klass_info structure, - a dictionary of annotations The (sql, params) is what the expression will produce, and alias is the "AS alias" for the column (possibly None). The klass_info structure contains the following information: - The base model of the query. - Which columns for that model are present in the query (by position of the select clause). - related_klass_infos: [f, klass_info] to descent into The annotations is a dictionary of {'attname': column position} values. """ select = [] klass_info = None annotations = {} select_idx = 0 for alias, (sql, params) in self.query.extra_select.items(): annotations[alias] = select_idx select.append((RawSQL(sql, params), alias)) select_idx += 1 assert not (self.query.select and self.query.default_cols) if self.query.default_cols: cols = self.get_default_columns() else: # self.query.select is a special case. These columns never go to # any model. cols = self.query.select if cols: select_list = [] for col in cols: select_list.append(select_idx) select.append((col, None)) select_idx += 1 klass_info = { 'model': self.query.model, 'select_fields': select_list, } for alias, annotation in self.query.annotation_select.items(): annotations[alias] = select_idx select.append((annotation, alias)) select_idx += 1 if self.query.select_related: related_klass_infos = self.get_related_selections(select) klass_info['related_klass_infos'] = related_klass_infos def get_select_from_parent(klass_info): for ki in klass_info['related_klass_infos']: if ki['from_parent']: ki['select_fields'] = (klass_info['select_fields'] + ki['select_fields']) get_select_from_parent(ki) get_select_from_parent(klass_info) ret = [] for col, alias in select: try: sql, params = self.compile(col) except EmptyResultSet: # Select a predicate that's always False. sql, params = '0', () else: sql, params = col.select_format(self, sql, params) ret.append((col, (sql, params), alias)) return ret, klass_info, annotations def get_order_by(self): """ Return a list of 2-tuples of form (expr, (sql, params, is_ref)) for the ORDER BY clause. The order_by clause can alter the select clause (for example it can add aliases to clauses that do not yet have one, or it can add totally new select clauses). """ if self.query.extra_order_by: ordering = self.query.extra_order_by elif not self.query.default_ordering: ordering = self.query.order_by elif self.query.order_by: ordering = self.query.order_by elif self.query.get_meta().ordering: ordering = self.query.get_meta().ordering self._meta_ordering = ordering else: ordering = [] if self.query.standard_ordering: asc, desc = ORDER_DIR['ASC'] else: asc, desc = ORDER_DIR['DESC'] order_by = [] for field in ordering: if hasattr(field, 'resolve_expression'): if isinstance(field, Value): # output_field must be resolved for constants. field = Cast(field, field.output_field) if not isinstance(field, OrderBy): field = field.asc() if not self.query.standard_ordering: field = field.copy() field.reverse_ordering() order_by.append((field, False)) continue if field == '?': # random order_by.append((OrderBy(Random()), False)) continue col, order = get_order_dir(field, asc) descending = order == 'DESC' if col in self.query.annotation_select: # Reference to expression in SELECT clause order_by.append(( OrderBy(Ref(col, self.query.annotation_select[col]), descending=descending), True)) continue if col in self.query.annotations: # References to an expression which is masked out of the SELECT # clause. expr = self.query.annotations[col] if isinstance(expr, Value): # output_field must be resolved for constants. expr = Cast(expr, expr.output_field) order_by.append((OrderBy(expr, descending=descending), False)) continue if '.' in field: # This came in through an extra(order_by=...) addition. Pass it # on verbatim. table, col = col.split('.', 1) order_by.append(( OrderBy( RawSQL('%s.%s' % (self.quote_name_unless_alias(table), col), []), descending=descending ), False)) continue if not self.query.extra or col not in self.query.extra: # 'col' is of the form 'field' or 'field1__field2' or # '-field1__field2__field', etc. order_by.extend(self.find_ordering_name( field, self.query.get_meta(), default_order=asc)) else: if col not in self.query.extra_select: order_by.append(( OrderBy(RawSQL(*self.query.extra[col]), descending=descending), False)) else: order_by.append(( OrderBy(Ref(col, RawSQL(*self.query.extra[col])), descending=descending), True)) result = [] seen = set() for expr, is_ref in order_by: resolved = expr.resolve_expression(self.query, allow_joins=True, reuse=None) if self.query.combinator: src = resolved.get_source_expressions()[0] # Relabel order by columns to raw numbers if this is a combined # query; necessary since the columns can't be referenced by the # fully qualified name and the simple column names may collide. for idx, (sel_expr, _, col_alias) in enumerate(self.select): if is_ref and col_alias == src.refs: src = src.source elif col_alias: continue if src == sel_expr: resolved.set_source_expressions([RawSQL('%d' % (idx + 1), ())]) break else: if col_alias: raise DatabaseError('ORDER BY term does not match any column in the result set.') # Add column used in ORDER BY clause without an alias to # the selected columns. self.query.add_select_col(src) resolved.set_source_expressions([RawSQL('%d' % len(self.query.select), ())]) sql, params = self.compile(resolved) # Don't add the same column twice, but the order direction is # not taken into account so we strip it. When this entire method # is refactored into expressions, then we can check each part as we # generate it. without_ordering = self.ordering_parts.search(sql).group(1) params_hash = make_hashable(params) if (without_ordering, params_hash) in seen: continue seen.add((without_ordering, params_hash)) result.append((resolved, (sql, params, is_ref))) return result def get_extra_select(self, order_by, select): extra_select = [] if self.query.distinct and not self.query.distinct_fields: select_sql = [t[1] for t in select] for expr, (sql, params, is_ref) in order_by: without_ordering = self.ordering_parts.search(sql).group(1) if not is_ref and (without_ordering, params) not in select_sql: extra_select.append((expr, (without_ordering, params), None)) return extra_select def quote_name_unless_alias(self, name): """ A wrapper around connection.ops.quote_name that doesn't quote aliases for table names. This avoids problems with some SQL dialects that treat quoted strings specially (e.g. PostgreSQL). """ if name in self.quote_cache: return self.quote_cache[name] if ((name in self.query.alias_map and name not in self.query.table_map) or name in self.query.extra_select or ( name in self.query.external_aliases and name not in self.query.table_map)): self.quote_cache[name] = name return name r = self.connection.ops.quote_name(name) self.quote_cache[name] = r return r def compile(self, node): vendor_impl = getattr(node, 'as_' + self.connection.vendor, None) if vendor_impl: sql, params = vendor_impl(self, self.connection) else: sql, params = node.as_sql(self, self.connection) return sql, params def get_combinator_sql(self, combinator, all): features = self.connection.features compilers = [ query.get_compiler(self.using, self.connection) for query in self.query.combined_queries if not query.is_empty() ] if not features.supports_slicing_ordering_in_compound: for query, compiler in zip(self.query.combined_queries, compilers): if query.low_mark or query.high_mark: raise DatabaseError('LIMIT/OFFSET not allowed in subqueries of compound statements.') if compiler.get_order_by(): raise DatabaseError('ORDER BY not allowed in subqueries of compound statements.') parts = () for compiler in compilers: try: # If the columns list is limited, then all combined queries # must have the same columns list. Set the selects defined on # the query on all combined queries, if not already set. if not compiler.query.values_select and self.query.values_select: compiler.query = compiler.query.clone() compiler.query.set_values(( *self.query.extra_select, *self.query.values_select, *self.query.annotation_select, )) part_sql, part_args = compiler.as_sql() if compiler.query.combinator: # Wrap in a subquery if wrapping in parentheses isn't # supported. if not features.supports_parentheses_in_compound: part_sql = 'SELECT * FROM ({})'.format(part_sql) # Add parentheses when combining with compound query if not # already added for all compound queries. elif not features.supports_slicing_ordering_in_compound: part_sql = '({})'.format(part_sql) parts += ((part_sql, part_args),) except EmptyResultSet: # Omit the empty queryset with UNION and with DIFFERENCE if the # first queryset is nonempty. if combinator == 'union' or (combinator == 'difference' and parts): continue raise if not parts: raise EmptyResultSet combinator_sql = self.connection.ops.set_operators[combinator] if all and combinator == 'union': combinator_sql += ' ALL' braces = '({})' if features.supports_slicing_ordering_in_compound else '{}' sql_parts, args_parts = zip(*((braces.format(sql), args) for sql, args in parts)) result = [' {} '.format(combinator_sql).join(sql_parts)] params = [] for part in args_parts: params.extend(part) return result, params def as_sql(self, with_limits=True, with_col_aliases=False): """ Create the SQL for this query. Return the SQL string and list of parameters. If 'with_limits' is False, any limit/offset information is not included in the query. """ refcounts_before = self.query.alias_refcount.copy() try: extra_select, order_by, group_by = self.pre_sql_setup() for_update_part = None # Is a LIMIT/OFFSET clause needed? with_limit_offset = with_limits and (self.query.high_mark is not None or self.query.low_mark) combinator = self.query.combinator features = self.connection.features if combinator: if not getattr(features, 'supports_select_{}'.format(combinator)): raise NotSupportedError('{} is not supported on this database backend.'.format(combinator)) result, params = self.get_combinator_sql(combinator, self.query.combinator_all) else: distinct_fields, distinct_params = self.get_distinct() # This must come after 'select', 'ordering', and 'distinct' # (see docstring of get_from_clause() for details). from_, f_params = self.get_from_clause() where, w_params = self.compile(self.where) if self.where is not None else ("", []) having, h_params = self.compile(self.having) if self.having is not None else ("", []) result = ['SELECT'] params = [] if self.query.distinct: distinct_result, distinct_params = self.connection.ops.distinct_sql( distinct_fields, distinct_params, ) result += distinct_result params += distinct_params out_cols = [] col_idx = 1 for _, (s_sql, s_params), alias in self.select + extra_select: if alias: s_sql = '%s AS %s' % (s_sql, self.connection.ops.quote_name(alias)) elif with_col_aliases: s_sql = '%s AS %s' % (s_sql, 'Col%d' % col_idx) col_idx += 1 params.extend(s_params) out_cols.append(s_sql) result += [', '.join(out_cols), 'FROM', *from_] params.extend(f_params) if self.query.select_for_update and self.connection.features.has_select_for_update: if self.connection.get_autocommit(): raise TransactionManagementError('select_for_update cannot be used outside of a transaction.') if with_limit_offset and not self.connection.features.supports_select_for_update_with_limit: raise NotSupportedError( 'LIMIT/OFFSET is not supported with ' 'select_for_update on this database backend.' ) nowait = self.query.select_for_update_nowait skip_locked = self.query.select_for_update_skip_locked of = self.query.select_for_update_of # If it's a NOWAIT/SKIP LOCKED/OF query but the backend # doesn't support it, raise NotSupportedError to prevent a # possible deadlock. if nowait and not self.connection.features.has_select_for_update_nowait: raise NotSupportedError('NOWAIT is not supported on this database backend.') elif skip_locked and not self.connection.features.has_select_for_update_skip_locked: raise NotSupportedError('SKIP LOCKED is not supported on this database backend.') elif of and not self.connection.features.has_select_for_update_of: raise NotSupportedError('FOR UPDATE OF is not supported on this database backend.') for_update_part = self.connection.ops.for_update_sql( nowait=nowait, skip_locked=skip_locked, of=self.get_select_for_update_of_arguments(), ) if for_update_part and self.connection.features.for_update_after_from: result.append(for_update_part) if where: result.append('WHERE %s' % where) params.extend(w_params) grouping = [] for g_sql, g_params in group_by: grouping.append(g_sql) params.extend(g_params) if grouping: if distinct_fields: raise NotImplementedError('annotate() + distinct(fields) is not implemented.') order_by = order_by or self.connection.ops.force_no_ordering() result.append('GROUP BY %s' % ', '.join(grouping)) if self._meta_ordering: # When the deprecation ends, replace with: # order_by = None warnings.warn( "%s QuerySet won't use Meta.ordering in Django 3.1. " "Add .order_by(%s) to retain the current query." % ( self.query.model.__name__, ', '.join(repr(f) for f in self._meta_ordering), ), RemovedInDjango31Warning, stacklevel=4, ) if having: result.append('HAVING %s' % having) params.extend(h_params) if self.query.explain_query: result.insert(0, self.connection.ops.explain_query_prefix( self.query.explain_format, **self.query.explain_options )) if order_by: ordering = [] for _, (o_sql, o_params, _) in order_by: ordering.append(o_sql) params.extend(o_params) result.append('ORDER BY %s' % ', '.join(ordering)) if with_limit_offset: result.append(self.connection.ops.limit_offset_sql(self.query.low_mark, self.query.high_mark)) if for_update_part and not self.connection.features.for_update_after_from: result.append(for_update_part) if self.query.subquery and extra_select: # If the query is used as a subquery, the extra selects would # result in more columns than the left-hand side expression is # expecting. This can happen when a subquery uses a combination # of order_by() and distinct(), forcing the ordering expressions # to be selected as well. Wrap the query in another subquery # to exclude extraneous selects. sub_selects = [] sub_params = [] for index, (select, _, alias) in enumerate(self.select, start=1): if not alias and with_col_aliases: alias = 'col%d' % index if alias: sub_selects.append("%s.%s" % ( self.connection.ops.quote_name('subquery'), self.connection.ops.quote_name(alias), )) else: select_clone = select.relabeled_clone({select.alias: 'subquery'}) subselect, subparams = select_clone.as_sql(self, self.connection) sub_selects.append(subselect) sub_params.extend(subparams) return 'SELECT %s FROM (%s) subquery' % ( ', '.join(sub_selects), ' '.join(result), ), tuple(sub_params + params) return ' '.join(result), tuple(params) finally: # Finally do cleanup - get rid of the joins we created above. self.query.reset_refcounts(refcounts_before) def get_default_columns(self, start_alias=None, opts=None, from_parent=None): """ Compute the default columns for selecting every field in the base model. Will sometimes be called to pull in related models (e.g. via select_related), in which case "opts" and "start_alias" will be given to provide a starting point for the traversal. Return a list of strings, quoted appropriately for use in SQL directly, as well as a set of aliases used in the select statement (if 'as_pairs' is True, return a list of (alias, col_name) pairs instead of strings as the first component and None as the second component). """ result = [] if opts is None: opts = self.query.get_meta() only_load = self.deferred_to_columns() start_alias = start_alias or self.query.get_initial_alias() # The 'seen_models' is used to optimize checking the needed parent # alias for a given field. This also includes None -> start_alias to # be used by local fields. seen_models = {None: start_alias} for field in opts.concrete_fields: model = field.model._meta.concrete_model # A proxy model will have a different model and concrete_model. We # will assign None if the field belongs to this model. if model == opts.model: model = None if from_parent and model is not None and issubclass( from_parent._meta.concrete_model, model._meta.concrete_model): # Avoid loading data for already loaded parents. # We end up here in the case select_related() resolution # proceeds from parent model to child model. In that case the # parent model data is already present in the SELECT clause, # and we want to avoid reloading the same data again. continue if field.model in only_load and field.attname not in only_load[field.model]: continue alias = self.query.join_parent_model(opts, model, start_alias, seen_models) column = field.get_col(alias) result.append(column) return result def get_distinct(self): """ Return a quoted list of fields to use in DISTINCT ON part of the query. This method can alter the tables in the query, and thus it must be called before get_from_clause(). """ result = [] params = [] opts = self.query.get_meta() for name in self.query.distinct_fields: parts = name.split(LOOKUP_SEP) _, targets, alias, joins, path, _, transform_function = self._setup_joins(parts, opts, None) targets, alias, _ = self.query.trim_joins(targets, joins, path) for target in targets: if name in self.query.annotation_select: result.append(name) else: r, p = self.compile(transform_function(target, alias)) result.append(r) params.append(p) return result, params def find_ordering_name(self, name, opts, alias=None, default_order='ASC', already_seen=None): """ Return the table alias (the name might be ambiguous, the alias will not be) and column name for ordering by the given 'name' parameter. The 'name' is of the form 'field1__field2__...__fieldN'. """ name, order = get_order_dir(name, default_order) descending = order == 'DESC' pieces = name.split(LOOKUP_SEP) field, targets, alias, joins, path, opts, transform_function = self._setup_joins(pieces, opts, alias) # If we get to this point and the field is a relation to another model, # append the default ordering for that model unless the attribute name # of the field is specified. if field.is_relation and opts.ordering and getattr(field, 'attname', None) != name: # Firstly, avoid infinite loops. already_seen = already_seen or set() join_tuple = tuple(getattr(self.query.alias_map[j], 'join_cols', None) for j in joins) if join_tuple in already_seen: raise FieldError('Infinite loop caused by ordering.') already_seen.add(join_tuple) results = [] for item in opts.ordering: if hasattr(item, 'resolve_expression') and not isinstance(item, OrderBy): item = item.desc() if descending else item.asc() if isinstance(item, OrderBy): results.append((item, False)) continue results.extend(self.find_ordering_name(item, opts, alias, order, already_seen)) return results targets, alias, _ = self.query.trim_joins(targets, joins, path) return [(OrderBy(transform_function(t, alias), descending=descending), False) for t in targets] def _setup_joins(self, pieces, opts, alias): """ Helper method for get_order_by() and get_distinct(). get_ordering() and get_distinct() must produce same target columns on same input, as the prefixes of get_ordering() and get_distinct() must match. Executing SQL where this is not true is an error. """ alias = alias or self.query.get_initial_alias() field, targets, opts, joins, path, transform_function = self.query.setup_joins(pieces, opts, alias) alias = joins[-1] return field, targets, alias, joins, path, opts, transform_function def get_from_clause(self): """ Return a list of strings that are joined together to go after the "FROM" part of the query, as well as a list any extra parameters that need to be included. Subclasses, can override this to create a from-clause via a "select". This should only be called after any SQL construction methods that might change the tables that are needed. This means the select columns, ordering, and distinct must be done first. """ result = [] params = [] for alias in tuple(self.query.alias_map): if not self.query.alias_refcount[alias]: continue try: from_clause = self.query.alias_map[alias] except KeyError: # Extra tables can end up in self.tables, but not in the # alias_map if they aren't in a join. That's OK. We skip them. continue clause_sql, clause_params = self.compile(from_clause) result.append(clause_sql) params.extend(clause_params) for t in self.query.extra_tables: alias, _ = self.query.table_alias(t) # Only add the alias if it's not already present (the table_alias() # call increments the refcount, so an alias refcount of one means # this is the only reference). if alias not in self.query.alias_map or self.query.alias_refcount[alias] == 1: result.append(', %s' % self.quote_name_unless_alias(alias)) return result, params def get_related_selections(self, select, opts=None, root_alias=None, cur_depth=1, requested=None, restricted=None): """ Fill in the information needed for a select_related query. The current depth is measured as the number of connections away from the root model (for example, cur_depth=1 means we are looking at models with direct connections to the root model). """ def _get_field_choices(): direct_choices = (f.name for f in opts.fields if f.is_relation) reverse_choices = ( f.field.related_query_name() for f in opts.related_objects if f.field.unique ) return chain(direct_choices, reverse_choices, self.query._filtered_relations) related_klass_infos = [] if not restricted and cur_depth > self.query.max_depth: # We've recursed far enough; bail out. return related_klass_infos if not opts: opts = self.query.get_meta() root_alias = self.query.get_initial_alias() only_load = self.query.get_loaded_field_names() # Setup for the case when only particular related fields should be # included in the related selection. fields_found = set() if requested is None: restricted = isinstance(self.query.select_related, dict) if restricted: requested = self.query.select_related def get_related_klass_infos(klass_info, related_klass_infos): klass_info['related_klass_infos'] = related_klass_infos for f in opts.fields: field_model = f.model._meta.concrete_model fields_found.add(f.name) if restricted: next = requested.get(f.name, {}) if not f.is_relation: # If a non-related field is used like a relation, # or if a single non-relational field is given. if next or f.name in requested: raise FieldError( "Non-relational field given in select_related: '%s'. " "Choices are: %s" % ( f.name, ", ".join(_get_field_choices()) or '(none)', ) ) else: next = False if not select_related_descend(f, restricted, requested, only_load.get(field_model)): continue klass_info = { 'model': f.remote_field.model, 'field': f, 'reverse': False, 'local_setter': f.set_cached_value, 'remote_setter': f.remote_field.set_cached_value if f.unique else lambda x, y: None, 'from_parent': False, } related_klass_infos.append(klass_info) select_fields = [] _, _, _, joins, _, _ = self.query.setup_joins( [f.name], opts, root_alias) alias = joins[-1] columns = self.get_default_columns(start_alias=alias, opts=f.remote_field.model._meta) for col in columns: select_fields.append(len(select)) select.append((col, None)) klass_info['select_fields'] = select_fields next_klass_infos = self.get_related_selections( select, f.remote_field.model._meta, alias, cur_depth + 1, next, restricted) get_related_klass_infos(klass_info, next_klass_infos) if restricted: related_fields = [ (o.field, o.related_model) for o in opts.related_objects if o.field.unique and not o.many_to_many ] for f, model in related_fields: if not select_related_descend(f, restricted, requested, only_load.get(model), reverse=True): continue related_field_name = f.related_query_name() fields_found.add(related_field_name) join_info = self.query.setup_joins([related_field_name], opts, root_alias) alias = join_info.joins[-1] from_parent = issubclass(model, opts.model) and model is not opts.model klass_info = { 'model': model, 'field': f, 'reverse': True, 'local_setter': f.remote_field.set_cached_value, 'remote_setter': f.set_cached_value, 'from_parent': from_parent, } related_klass_infos.append(klass_info) select_fields = [] columns = self.get_default_columns( start_alias=alias, opts=model._meta, from_parent=opts.model) for col in columns: select_fields.append(len(select)) select.append((col, None)) klass_info['select_fields'] = select_fields next = requested.get(f.related_query_name(), {}) next_klass_infos = self.get_related_selections( select, model._meta, alias, cur_depth + 1, next, restricted) get_related_klass_infos(klass_info, next_klass_infos) for name in list(requested): # Filtered relations work only on the topmost level. if cur_depth > 1: break if name in self.query._filtered_relations: fields_found.add(name) f, _, join_opts, joins, _, _ = self.query.setup_joins([name], opts, root_alias) model = join_opts.model alias = joins[-1] from_parent = issubclass(model, opts.model) and model is not opts.model def local_setter(obj, from_obj): # Set a reverse fk object when relation is non-empty. if from_obj: f.remote_field.set_cached_value(from_obj, obj) def remote_setter(obj, from_obj): setattr(from_obj, name, obj) klass_info = { 'model': model, 'field': f, 'reverse': True, 'local_setter': local_setter, 'remote_setter': remote_setter, 'from_parent': from_parent, } related_klass_infos.append(klass_info) select_fields = [] columns = self.get_default_columns( start_alias=alias, opts=model._meta, from_parent=opts.model, ) for col in columns: select_fields.append(len(select)) select.append((col, None)) klass_info['select_fields'] = select_fields next_requested = requested.get(name, {}) next_klass_infos = self.get_related_selections( select, opts=model._meta, root_alias=alias, cur_depth=cur_depth + 1, requested=next_requested, restricted=restricted, ) get_related_klass_infos(klass_info, next_klass_infos) fields_not_found = set(requested).difference(fields_found) if fields_not_found: invalid_fields = ("'%s'" % s for s in fields_not_found) raise FieldError( 'Invalid field name(s) given in select_related: %s. ' 'Choices are: %s' % ( ', '.join(invalid_fields), ', '.join(_get_field_choices()) or '(none)', ) ) return related_klass_infos def get_select_for_update_of_arguments(self): """ Return a quoted list of arguments for the SELECT FOR UPDATE OF part of the query. """ def _get_field_choices(): """Yield all allowed field paths in breadth-first search order.""" queue = collections.deque([(None, self.klass_info)]) while queue: parent_path, klass_info = queue.popleft() if parent_path is None: path = [] yield 'self' else: field = klass_info['field'] if klass_info['reverse']: field = field.remote_field path = parent_path + [field.name] yield LOOKUP_SEP.join(path) queue.extend( (path, klass_info) for klass_info in klass_info.get('related_klass_infos', []) ) result = [] invalid_names = [] for name in self.query.select_for_update_of: parts = [] if name == 'self' else name.split(LOOKUP_SEP) klass_info = self.klass_info for part in parts: for related_klass_info in klass_info.get('related_klass_infos', []): field = related_klass_info['field'] if related_klass_info['reverse']: field = field.remote_field if field.name == part: klass_info = related_klass_info break else: klass_info = None break if klass_info is None: invalid_names.append(name) continue select_index = klass_info['select_fields'][0] col = self.select[select_index][0] if self.connection.features.select_for_update_of_column: result.append(self.compile(col)[0]) else: result.append(self.quote_name_unless_alias(col.alias)) if invalid_names: raise FieldError( 'Invalid field name(s) given in select_for_update(of=(...)): %s. ' 'Only relational fields followed in the query are allowed. ' 'Choices are: %s.' % ( ', '.join(invalid_names), ', '.join(_get_field_choices()), ) ) return result def deferred_to_columns(self): """ Convert the self.deferred_loading data structure to mapping of table names to sets of column names which are to be loaded. Return the dictionary. """ columns = {} self.query.deferred_to_data(columns, self.query.get_loaded_field_names_cb) return columns def get_converters(self, expressions): converters = {} for i, expression in enumerate(expressions): if expression: backend_converters = self.connection.ops.get_db_converters(expression) field_converters = expression.get_db_converters(self.connection) if backend_converters or field_converters: converters[i] = (backend_converters + field_converters, expression) return converters def apply_converters(self, rows, converters): connection = self.connection converters = list(converters.items()) for row in map(list, rows): for pos, (convs, expression) in converters: value = row[pos] for converter in convs: value = converter(value, expression, connection) row[pos] = value yield row def results_iter(self, results=None, tuple_expected=False, chunked_fetch=False, chunk_size=GET_ITERATOR_CHUNK_SIZE): """Return an iterator over the results from executing this query.""" if results is None: results = self.execute_sql(MULTI, chunked_fetch=chunked_fetch, chunk_size=chunk_size) fields = [s[0] for s in self.select[0:self.col_count]] converters = self.get_converters(fields) rows = chain.from_iterable(results) if converters: rows = self.apply_converters(rows, converters) if tuple_expected: rows = map(tuple, rows) return rows def has_results(self): """ Backends (e.g. NoSQL) can override this in order to use optimized versions of "query has any results." """ # This is always executed on a query clone, so we can modify self.query self.query.add_extra({'a': 1}, None, None, None, None, None) self.query.set_extra_mask(['a']) return bool(self.execute_sql(SINGLE)) def execute_sql(self, result_type=MULTI, chunked_fetch=False, chunk_size=GET_ITERATOR_CHUNK_SIZE): """ Run the query against the database and return the result(s). The return value is a single data item if result_type is SINGLE, or an iterator over the results if the result_type is MULTI. result_type is either MULTI (use fetchmany() to retrieve all rows), SINGLE (only retrieve a single row), or None. In this last case, the cursor is returned if any query is executed, since it's used by subclasses such as InsertQuery). It's possible, however, that no query is needed, as the filters describe an empty set. In that case, None is returned, to avoid any unnecessary database interaction. """ result_type = result_type or NO_RESULTS try: sql, params = self.as_sql() if not sql: raise EmptyResultSet except EmptyResultSet: if result_type == MULTI: return iter([]) else: return if chunked_fetch: cursor = self.connection.chunked_cursor() else: cursor = self.connection.cursor() try: cursor.execute(sql, params) except Exception: # Might fail for server-side cursors (e.g. connection closed) cursor.close() raise if result_type == CURSOR: # Give the caller the cursor to process and close. return cursor if result_type == SINGLE: try: val = cursor.fetchone() if val: return val[0:self.col_count] return val finally: # done with the cursor cursor.close() if result_type == NO_RESULTS: cursor.close() return result = cursor_iter( cursor, self.connection.features.empty_fetchmany_value, self.col_count if self.has_extra_select else None, chunk_size, ) if not chunked_fetch or not self.connection.features.can_use_chunked_reads: try: # If we are using non-chunked reads, we return the same data # structure as normally, but ensure it is all read into memory # before going any further. Use chunked_fetch if requested, # unless the database doesn't support it. return list(result) finally: # done with the cursor cursor.close() return result def as_subquery_condition(self, alias, columns, compiler): qn = compiler.quote_name_unless_alias qn2 = self.connection.ops.quote_name for index, select_col in enumerate(self.query.select): lhs_sql, lhs_params = self.compile(select_col) rhs = '%s.%s' % (qn(alias), qn2(columns[index])) self.query.where.add( QueryWrapper('%s = %s' % (lhs_sql, rhs), lhs_params), 'AND') sql, params = self.as_sql() return 'EXISTS (%s)' % sql, params def explain_query(self): result = list(self.execute_sql()) # Some backends return 1 item tuples with strings, and others return # tuples with integers and strings. Flatten them out into strings. for row in result[0]: if not isinstance(row, str): yield ' '.join(str(c) for c in row) else: yield row class SQLInsertCompiler(SQLCompiler): return_id = False def field_as_sql(self, field, val): """ Take a field and a value intended to be saved on that field, and return placeholder SQL and accompanying params. Check for raw values, expressions, and fields with get_placeholder() defined in that order. When field is None, consider the value raw and use it as the placeholder, with no corresponding parameters returned. """ if field is None: # A field value of None means the value is raw. sql, params = val, [] elif hasattr(val, 'as_sql'): # This is an expression, let's compile it. sql, params = self.compile(val) elif hasattr(field, 'get_placeholder'): # Some fields (e.g. geo fields) need special munging before # they can be inserted. sql, params = field.get_placeholder(val, self, self.connection), [val] else: # Return the common case for the placeholder sql, params = '%s', [val] # The following hook is only used by Oracle Spatial, which sometimes # needs to yield 'NULL' and [] as its placeholder and params instead # of '%s' and [None]. The 'NULL' placeholder is produced earlier by # OracleOperations.get_geom_placeholder(). The following line removes # the corresponding None parameter. See ticket #10888. params = self.connection.ops.modify_insert_params(sql, params) return sql, params def prepare_value(self, field, value): """ Prepare a value to be used in a query by resolving it if it is an expression and otherwise calling the field's get_db_prep_save(). """ if hasattr(value, 'resolve_expression'): value = value.resolve_expression(self.query, allow_joins=False, for_save=True) # Don't allow values containing Col expressions. They refer to # existing columns on a row, but in the case of insert the row # doesn't exist yet. if value.contains_column_references: raise ValueError( 'Failed to insert expression "%s" on %s. F() expressions ' 'can only be used to update, not to insert.' % (value, field) ) if value.contains_aggregate: raise FieldError( 'Aggregate functions are not allowed in this query ' '(%s=%r).' % (field.name, value) ) if value.contains_over_clause: raise FieldError( 'Window expressions are not allowed in this query (%s=%r).' % (field.name, value) ) else: value = field.get_db_prep_save(value, connection=self.connection) return value def pre_save_val(self, field, obj): """ Get the given field's value off the given obj. pre_save() is used for things like auto_now on DateTimeField. Skip it if this is a raw query. """ if self.query.raw: return getattr(obj, field.attname) return field.pre_save(obj, add=True) def assemble_as_sql(self, fields, value_rows): """ Take a sequence of N fields and a sequence of M rows of values, and generate placeholder SQL and parameters for each field and value. Return a pair containing: * a sequence of M rows of N SQL placeholder strings, and * a sequence of M rows of corresponding parameter values. Each placeholder string may contain any number of '%s' interpolation strings, and each parameter row will contain exactly as many params as the total number of '%s's in the corresponding placeholder row. """ if not value_rows: return [], [] # list of (sql, [params]) tuples for each object to be saved # Shape: [n_objs][n_fields][2] rows_of_fields_as_sql = ( (self.field_as_sql(field, v) for field, v in zip(fields, row)) for row in value_rows ) # tuple like ([sqls], [[params]s]) for each object to be saved # Shape: [n_objs][2][n_fields] sql_and_param_pair_rows = (zip(*row) for row in rows_of_fields_as_sql) # Extract separate lists for placeholders and params. # Each of these has shape [n_objs][n_fields] placeholder_rows, param_rows = zip(*sql_and_param_pair_rows) # Params for each field are still lists, and need to be flattened. param_rows = [[p for ps in row for p in ps] for row in param_rows] return placeholder_rows, param_rows def as_sql(self): # We don't need quote_name_unless_alias() here, since these are all # going to be column names (so we can avoid the extra overhead). qn = self.connection.ops.quote_name opts = self.query.get_meta() insert_statement = self.connection.ops.insert_statement(ignore_conflicts=self.query.ignore_conflicts) result = ['%s %s' % (insert_statement, qn(opts.db_table))] fields = self.query.fields or [opts.pk] result.append('(%s)' % ', '.join(qn(f.column) for f in fields)) if self.query.fields: value_rows = [ [self.prepare_value(field, self.pre_save_val(field, obj)) for field in fields] for obj in self.query.objs ] else: # An empty object. value_rows = [[self.connection.ops.pk_default_value()] for _ in self.query.objs] fields = [None] # Currently the backends just accept values when generating bulk # queries and generate their own placeholders. Doing that isn't # necessary and it should be possible to use placeholders and # expressions in bulk inserts too. can_bulk = (not self.return_id and self.connection.features.has_bulk_insert) placeholder_rows, param_rows = self.assemble_as_sql(fields, value_rows) ignore_conflicts_suffix_sql = self.connection.ops.ignore_conflicts_suffix_sql( ignore_conflicts=self.query.ignore_conflicts ) if self.return_id and self.connection.features.can_return_columns_from_insert: if self.connection.features.can_return_rows_from_bulk_insert: result.append(self.connection.ops.bulk_insert_sql(fields, placeholder_rows)) params = param_rows else: result.append("VALUES (%s)" % ", ".join(placeholder_rows[0])) params = [param_rows[0]] if ignore_conflicts_suffix_sql: result.append(ignore_conflicts_suffix_sql) col = "%s.%s" % (qn(opts.db_table), qn(opts.pk.column)) r_fmt, r_params = self.connection.ops.return_insert_id(opts.pk) # Skip empty r_fmt to allow subclasses to customize behavior for # 3rd party backends. Refs #19096. if r_fmt: result.append(r_fmt % col) params += [r_params] return [(" ".join(result), tuple(chain.from_iterable(params)))] if can_bulk: result.append(self.connection.ops.bulk_insert_sql(fields, placeholder_rows)) if ignore_conflicts_suffix_sql: result.append(ignore_conflicts_suffix_sql) return [(" ".join(result), tuple(p for ps in param_rows for p in ps))] else: if ignore_conflicts_suffix_sql: result.append(ignore_conflicts_suffix_sql) return [ (" ".join(result + ["VALUES (%s)" % ", ".join(p)]), vals) for p, vals in zip(placeholder_rows, param_rows) ] def execute_sql(self, return_id=False): assert not ( return_id and len(self.query.objs) != 1 and not self.connection.features.can_return_rows_from_bulk_insert ) self.return_id = return_id with self.connection.cursor() as cursor: for sql, params in self.as_sql(): cursor.execute(sql, params) if not return_id: return if self.connection.features.can_return_rows_from_bulk_insert and len(self.query.objs) > 1: return self.connection.ops.fetch_returned_insert_ids(cursor) if self.connection.features.can_return_columns_from_insert: assert len(self.query.objs) == 1 return self.connection.ops.fetch_returned_insert_id(cursor) return self.connection.ops.last_insert_id( cursor, self.query.get_meta().db_table, self.query.get_meta().pk.column ) class SQLDeleteCompiler(SQLCompiler): def as_sql(self): """ Create the SQL for this query. Return the SQL string and list of parameters. """ assert len([t for t in self.query.alias_map if self.query.alias_refcount[t] > 0]) == 1, \ "Can only delete from one table at a time." qn = self.quote_name_unless_alias result = ['DELETE FROM %s' % qn(self.query.base_table)] where, params = self.compile(self.query.where) if where: result.append('WHERE %s' % where) return ' '.join(result), tuple(params) class SQLUpdateCompiler(SQLCompiler): def as_sql(self): """ Create the SQL for this query. Return the SQL string and list of parameters. """ self.pre_sql_setup() if not self.query.values: return '', () qn = self.quote_name_unless_alias values, update_params = [], [] for field, model, val in self.query.values: if hasattr(val, 'resolve_expression'): val = val.resolve_expression(self.query, allow_joins=False, for_save=True) if val.contains_aggregate: raise FieldError( 'Aggregate functions are not allowed in this query ' '(%s=%r).' % (field.name, val) ) if val.contains_over_clause: raise FieldError( 'Window expressions are not allowed in this query ' '(%s=%r).' % (field.name, val) ) elif hasattr(val, 'prepare_database_save'): if field.remote_field: val = field.get_db_prep_save( val.prepare_database_save(field), connection=self.connection, ) else: raise TypeError( "Tried to update field %s with a model instance, %r. " "Use a value compatible with %s." % (field, val, field.__class__.__name__) ) else: val = field.get_db_prep_save(val, connection=self.connection) # Getting the placeholder for the field. if hasattr(field, 'get_placeholder'): placeholder = field.get_placeholder(val, self, self.connection) else: placeholder = '%s' name = field.column if hasattr(val, 'as_sql'): sql, params = self.compile(val) values.append('%s = %s' % (qn(name), placeholder % sql)) update_params.extend(params) elif val is not None: values.append('%s = %s' % (qn(name), placeholder)) update_params.append(val) else: values.append('%s = NULL' % qn(name)) table = self.query.base_table result = [ 'UPDATE %s SET' % qn(table), ', '.join(values), ] where, params = self.compile(self.query.where) if where: result.append('WHERE %s' % where) return ' '.join(result), tuple(update_params + params) def execute_sql(self, result_type): """ Execute the specified update. Return the number of rows affected by the primary update query. The "primary update query" is the first non-empty query that is executed. Row counts for any subsequent, related queries are not available. """ cursor = super().execute_sql(result_type) try: rows = cursor.rowcount if cursor else 0 is_empty = cursor is None finally: if cursor: cursor.close() for query in self.query.get_related_updates(): aux_rows = query.get_compiler(self.using).execute_sql(result_type) if is_empty and aux_rows: rows = aux_rows is_empty = False return rows def pre_sql_setup(self): """ If the update depends on results from other tables, munge the "where" conditions to match the format required for (portable) SQL updates. If multiple updates are required, pull out the id values to update at this point so that they don't change as a result of the progressive updates. """ refcounts_before = self.query.alias_refcount.copy() # Ensure base table is in the query self.query.get_initial_alias() count = self.query.count_active_tables() if not self.query.related_updates and count == 1: return query = self.query.chain(klass=Query) query.select_related = False query.clear_ordering(True) query.extra = {} query.select = [] query.add_fields([query.get_meta().pk.name]) super().pre_sql_setup() must_pre_select = count > 1 and not self.connection.features.update_can_self_select # Now we adjust the current query: reset the where clause and get rid # of all the tables we don't need (since they're in the sub-select). self.query.where = self.query.where_class() if self.query.related_updates or must_pre_select: # Either we're using the idents in multiple update queries (so # don't want them to change), or the db backend doesn't support # selecting from the updating table (e.g. MySQL). idents = [] for rows in query.get_compiler(self.using).execute_sql(MULTI): idents.extend(r[0] for r in rows) self.query.add_filter(('pk__in', idents)) self.query.related_ids = idents else: # The fast path. Filters and updates in one query. self.query.add_filter(('pk__in', query)) self.query.reset_refcounts(refcounts_before) class SQLAggregateCompiler(SQLCompiler): def as_sql(self): """ Create the SQL for this query. Return the SQL string and list of parameters. """ sql, params = [], [] for annotation in self.query.annotation_select.values(): ann_sql, ann_params = self.compile(annotation) ann_sql, ann_params = annotation.select_format(self, ann_sql, ann_params) sql.append(ann_sql) params.extend(ann_params) self.col_count = len(self.query.annotation_select) sql = ', '.join(sql) params = tuple(params) sql = 'SELECT %s FROM (%s) subquery' % (sql, self.query.subquery) params = params + self.query.sub_params return sql, params def cursor_iter(cursor, sentinel, col_count, itersize): """ Yield blocks of rows from a cursor and ensure the cursor is closed when done. """ try: for rows in iter((lambda: cursor.fetchmany(itersize)), sentinel): yield rows if col_count is None else [r[:col_count] for r in rows] finally: cursor.close()
e3725c1e40df3bb6ac9b7fe10b307e193a5b9a1aaeb10ac18a61ae0b295de37d
import datetime import re import uuid from functools import lru_cache from django.conf import settings from django.db.backends.base.operations import BaseDatabaseOperations from django.db.backends.utils import strip_quotes, truncate_name from django.db.utils import DatabaseError from django.utils import timezone from django.utils.encoding import force_bytes, force_str from django.utils.functional import cached_property from .base import Database from .utils import BulkInsertMapper, InsertVar, Oracle_datetime class DatabaseOperations(BaseDatabaseOperations): # Oracle uses NUMBER(11) and NUMBER(19) for integer fields. integer_field_ranges = { 'SmallIntegerField': (-99999999999, 99999999999), 'IntegerField': (-99999999999, 99999999999), 'BigIntegerField': (-9999999999999999999, 9999999999999999999), 'PositiveSmallIntegerField': (0, 99999999999), 'PositiveIntegerField': (0, 99999999999), } set_operators = {**BaseDatabaseOperations.set_operators, 'difference': 'MINUS'} # TODO: colorize this SQL code with style.SQL_KEYWORD(), etc. _sequence_reset_sql = """ DECLARE table_value integer; seq_value integer; seq_name user_tab_identity_cols.sequence_name%%TYPE; BEGIN BEGIN SELECT sequence_name INTO seq_name FROM user_tab_identity_cols WHERE table_name = '%(table_name)s' AND column_name = '%(column_name)s'; EXCEPTION WHEN NO_DATA_FOUND THEN seq_name := '%(no_autofield_sequence_name)s'; END; SELECT NVL(MAX(%(column)s), 0) INTO table_value FROM %(table)s; SELECT NVL(last_number - cache_size, 0) INTO seq_value FROM user_sequences WHERE sequence_name = seq_name; WHILE table_value > seq_value LOOP EXECUTE IMMEDIATE 'SELECT "'||seq_name||'".nextval FROM DUAL' INTO seq_value; END LOOP; END; /""" # Oracle doesn't support string without precision; use the max string size. cast_char_field_without_max_length = 'NVARCHAR2(2000)' cast_data_types = { 'AutoField': 'NUMBER(11)', 'BigAutoField': 'NUMBER(19)', 'SmallAutoField': 'NUMBER(5)', 'TextField': cast_char_field_without_max_length, } def cache_key_culling_sql(self): return 'SELECT cache_key FROM %s ORDER BY cache_key OFFSET %%s ROWS FETCH FIRST 1 ROWS ONLY' def date_extract_sql(self, lookup_type, field_name): if lookup_type == 'week_day': # TO_CHAR(field, 'D') returns an integer from 1-7, where 1=Sunday. return "TO_CHAR(%s, 'D')" % field_name elif lookup_type == 'week': # IW = ISO week number return "TO_CHAR(%s, 'IW')" % field_name elif lookup_type == 'quarter': return "TO_CHAR(%s, 'Q')" % field_name elif lookup_type == 'iso_year': return "TO_CHAR(%s, 'IYYY')" % field_name else: # https://docs.oracle.com/en/database/oracle/oracle-database/18/sqlrf/EXTRACT-datetime.html return "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name) def date_trunc_sql(self, lookup_type, field_name): # https://docs.oracle.com/en/database/oracle/oracle-database/18/sqlrf/ROUND-and-TRUNC-Date-Functions.html if lookup_type in ('year', 'month'): return "TRUNC(%s, '%s')" % (field_name, lookup_type.upper()) elif lookup_type == 'quarter': return "TRUNC(%s, 'Q')" % field_name elif lookup_type == 'week': return "TRUNC(%s, 'IW')" % field_name else: return "TRUNC(%s)" % field_name # Oracle crashes with "ORA-03113: end-of-file on communication channel" # if the time zone name is passed in parameter. Use interpolation instead. # https://groups.google.com/forum/#!msg/django-developers/zwQju7hbG78/9l934yelwfsJ # This regexp matches all time zone names from the zoneinfo database. _tzname_re = re.compile(r'^[\w/:+-]+$') def _prepare_tzname_delta(self, tzname): if '+' in tzname: return tzname[tzname.find('+'):] elif '-' in tzname: return tzname[tzname.find('-'):] return tzname def _convert_field_to_tz(self, field_name, tzname): if not settings.USE_TZ: return field_name if not self._tzname_re.match(tzname): raise ValueError("Invalid time zone name: %s" % tzname) # Convert from connection timezone to the local time, returning # TIMESTAMP WITH TIME ZONE and cast it back to TIMESTAMP to strip the # TIME ZONE details. if self.connection.timezone_name != tzname: return "CAST((FROM_TZ(%s, '%s') AT TIME ZONE '%s') AS TIMESTAMP)" % ( field_name, self.connection.timezone_name, self._prepare_tzname_delta(tzname), ) return field_name def datetime_cast_date_sql(self, field_name, tzname): field_name = self._convert_field_to_tz(field_name, tzname) return 'TRUNC(%s)' % field_name def datetime_cast_time_sql(self, field_name, tzname): # Since `TimeField` values are stored as TIMESTAMP where only the date # part is ignored, convert the field to the specified timezone. return self._convert_field_to_tz(field_name, tzname) def datetime_extract_sql(self, lookup_type, field_name, tzname): field_name = self._convert_field_to_tz(field_name, tzname) return self.date_extract_sql(lookup_type, field_name) def datetime_trunc_sql(self, lookup_type, field_name, tzname): field_name = self._convert_field_to_tz(field_name, tzname) # https://docs.oracle.com/en/database/oracle/oracle-database/18/sqlrf/ROUND-and-TRUNC-Date-Functions.html if lookup_type in ('year', 'month'): sql = "TRUNC(%s, '%s')" % (field_name, lookup_type.upper()) elif lookup_type == 'quarter': sql = "TRUNC(%s, 'Q')" % field_name elif lookup_type == 'week': sql = "TRUNC(%s, 'IW')" % field_name elif lookup_type == 'day': sql = "TRUNC(%s)" % field_name elif lookup_type == 'hour': sql = "TRUNC(%s, 'HH24')" % field_name elif lookup_type == 'minute': sql = "TRUNC(%s, 'MI')" % field_name else: sql = "CAST(%s AS DATE)" % field_name # Cast to DATE removes sub-second precision. return sql def time_trunc_sql(self, lookup_type, field_name): # The implementation is similar to `datetime_trunc_sql` as both # `DateTimeField` and `TimeField` are stored as TIMESTAMP where # the date part of the later is ignored. if lookup_type == 'hour': sql = "TRUNC(%s, 'HH24')" % field_name elif lookup_type == 'minute': sql = "TRUNC(%s, 'MI')" % field_name elif lookup_type == 'second': sql = "CAST(%s AS DATE)" % field_name # Cast to DATE removes sub-second precision. return sql def get_db_converters(self, expression): converters = super().get_db_converters(expression) internal_type = expression.output_field.get_internal_type() if internal_type == 'TextField': converters.append(self.convert_textfield_value) elif internal_type == 'BinaryField': converters.append(self.convert_binaryfield_value) elif internal_type in ['BooleanField', 'NullBooleanField']: converters.append(self.convert_booleanfield_value) elif internal_type == 'DateTimeField': if settings.USE_TZ: converters.append(self.convert_datetimefield_value) elif internal_type == 'DateField': converters.append(self.convert_datefield_value) elif internal_type == 'TimeField': converters.append(self.convert_timefield_value) elif internal_type == 'UUIDField': converters.append(self.convert_uuidfield_value) # Oracle stores empty strings as null. If the field accepts the empty # string, undo this to adhere to the Django convention of using # the empty string instead of null. if expression.field.empty_strings_allowed: converters.append( self.convert_empty_bytes if internal_type == 'BinaryField' else self.convert_empty_string ) return converters def convert_textfield_value(self, value, expression, connection): if isinstance(value, Database.LOB): value = value.read() return value def convert_binaryfield_value(self, value, expression, connection): if isinstance(value, Database.LOB): value = force_bytes(value.read()) return value def convert_booleanfield_value(self, value, expression, connection): if value in (0, 1): value = bool(value) return value # cx_Oracle always returns datetime.datetime objects for # DATE and TIMESTAMP columns, but Django wants to see a # python datetime.date, .time, or .datetime. def convert_datetimefield_value(self, value, expression, connection): if value is not None: value = timezone.make_aware(value, self.connection.timezone) return value def convert_datefield_value(self, value, expression, connection): if isinstance(value, Database.Timestamp): value = value.date() return value def convert_timefield_value(self, value, expression, connection): if isinstance(value, Database.Timestamp): value = value.time() return value def convert_uuidfield_value(self, value, expression, connection): if value is not None: value = uuid.UUID(value) return value @staticmethod def convert_empty_string(value, expression, connection): return '' if value is None else value @staticmethod def convert_empty_bytes(value, expression, connection): return b'' if value is None else value def deferrable_sql(self): return " DEFERRABLE INITIALLY DEFERRED" def fetch_returned_insert_id(self, cursor): value = cursor._insert_id_var.getvalue() if value is None or value == []: # cx_Oracle < 6.3 returns None, >= 6.3 returns empty list. raise DatabaseError( 'The database did not return a new row id. Probably "ORA-1403: ' 'no data found" was raised internally but was hidden by the ' 'Oracle OCI library (see https://code.djangoproject.com/ticket/28859).' ) # cx_Oracle < 7 returns value, >= 7 returns list with single value. return value[0] if isinstance(value, list) else value def field_cast_sql(self, db_type, internal_type): if db_type and db_type.endswith('LOB'): return "DBMS_LOB.SUBSTR(%s)" else: return "%s" def no_limit_value(self): return None def limit_offset_sql(self, low_mark, high_mark): fetch, offset = self._get_limit_offset_params(low_mark, high_mark) return ' '.join(sql for sql in ( ('OFFSET %d ROWS' % offset) if offset else None, ('FETCH FIRST %d ROWS ONLY' % fetch) if fetch else None, ) if sql) def last_executed_query(self, cursor, sql, params): # https://cx-oracle.readthedocs.io/en/latest/cursor.html#Cursor.statement # The DB API definition does not define this attribute. statement = cursor.statement # Unlike Psycopg's `query` and MySQLdb`'s `_executed`, cx_Oracle's # `statement` doesn't contain the query parameters. Substitute # parameters manually. if isinstance(params, (tuple, list)): for i, param in enumerate(params): statement = statement.replace(':arg%d' % i, force_str(param, errors='replace')) elif isinstance(params, dict): for key, param in params.items(): statement = statement.replace(':%s' % key, force_str(param, errors='replace')) return statement def last_insert_id(self, cursor, table_name, pk_name): sq_name = self._get_sequence_name(cursor, strip_quotes(table_name), pk_name) cursor.execute('"%s".currval' % sq_name) return cursor.fetchone()[0] def lookup_cast(self, lookup_type, internal_type=None): if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'): return "UPPER(%s)" return "%s" def max_in_list_size(self): return 1000 def max_name_length(self): return 30 def pk_default_value(self): return "NULL" def prep_for_iexact_query(self, x): return x def process_clob(self, value): if value is None: return '' return value.read() def quote_name(self, name): # SQL92 requires delimited (quoted) names to be case-sensitive. When # not quoted, Oracle has case-insensitive behavior for identifiers, but # always defaults to uppercase. # We simplify things by making Oracle identifiers always uppercase. if not name.startswith('"') and not name.endswith('"'): name = '"%s"' % truncate_name(name.upper(), self.max_name_length()) # Oracle puts the query text into a (query % args) construct, so % signs # in names need to be escaped. The '%%' will be collapsed back to '%' at # that stage so we aren't really making the name longer here. name = name.replace('%', '%%') return name.upper() def random_function_sql(self): return "DBMS_RANDOM.RANDOM" def regex_lookup(self, lookup_type): if lookup_type == 'regex': match_option = "'c'" else: match_option = "'i'" return 'REGEXP_LIKE(%%s, %%s, %s)' % match_option def return_insert_id(self, field): return 'RETURNING %s INTO %%s', (InsertVar(field),) def __foreign_key_constraints(self, table_name, recursive): with self.connection.cursor() as cursor: if recursive: cursor.execute(""" SELECT user_tables.table_name, rcons.constraint_name FROM user_tables JOIN user_constraints cons ON (user_tables.table_name = cons.table_name AND cons.constraint_type = ANY('P', 'U')) LEFT JOIN user_constraints rcons ON (user_tables.table_name = rcons.table_name AND rcons.constraint_type = 'R') START WITH user_tables.table_name = UPPER(%s) CONNECT BY NOCYCLE PRIOR cons.constraint_name = rcons.r_constraint_name GROUP BY user_tables.table_name, rcons.constraint_name HAVING user_tables.table_name != UPPER(%s) ORDER BY MAX(level) DESC """, (table_name, table_name)) else: cursor.execute(""" SELECT cons.table_name, cons.constraint_name FROM user_constraints cons WHERE cons.constraint_type = 'R' AND cons.table_name = UPPER(%s) """, (table_name,)) return cursor.fetchall() @cached_property def _foreign_key_constraints(self): # 512 is large enough to fit the ~330 tables (as of this writing) in # Django's test suite. return lru_cache(maxsize=512)(self.__foreign_key_constraints) def sql_flush(self, style, tables, sequences, allow_cascade=False): if tables: truncated_tables = {table.upper() for table in tables} constraints = set() # Oracle's TRUNCATE CASCADE only works with ON DELETE CASCADE # foreign keys which Django doesn't define. Emulate the # PostgreSQL behavior which truncates all dependent tables by # manually retrieving all foreign key constraints and resolving # dependencies. for table in tables: for foreign_table, constraint in self._foreign_key_constraints(table, recursive=allow_cascade): if allow_cascade: truncated_tables.add(foreign_table) constraints.add((foreign_table, constraint)) sql = [ "%s %s %s %s %s %s %s %s;" % ( style.SQL_KEYWORD('ALTER'), style.SQL_KEYWORD('TABLE'), style.SQL_FIELD(self.quote_name(table)), style.SQL_KEYWORD('DISABLE'), style.SQL_KEYWORD('CONSTRAINT'), style.SQL_FIELD(self.quote_name(constraint)), style.SQL_KEYWORD('KEEP'), style.SQL_KEYWORD('INDEX'), ) for table, constraint in constraints ] + [ "%s %s %s;" % ( style.SQL_KEYWORD('TRUNCATE'), style.SQL_KEYWORD('TABLE'), style.SQL_FIELD(self.quote_name(table)), ) for table in truncated_tables ] + [ "%s %s %s %s %s %s;" % ( style.SQL_KEYWORD('ALTER'), style.SQL_KEYWORD('TABLE'), style.SQL_FIELD(self.quote_name(table)), style.SQL_KEYWORD('ENABLE'), style.SQL_KEYWORD('CONSTRAINT'), style.SQL_FIELD(self.quote_name(constraint)), ) for table, constraint in constraints ] # Since we've just deleted all the rows, running our sequence # ALTER code will reset the sequence to 0. sql.extend(self.sequence_reset_by_name_sql(style, sequences)) return sql else: return [] def sequence_reset_by_name_sql(self, style, sequences): sql = [] for sequence_info in sequences: no_autofield_sequence_name = self._get_no_autofield_sequence_name(sequence_info['table']) table = self.quote_name(sequence_info['table']) column = self.quote_name(sequence_info['column'] or 'id') query = self._sequence_reset_sql % { 'no_autofield_sequence_name': no_autofield_sequence_name, 'table': table, 'column': column, 'table_name': strip_quotes(table), 'column_name': strip_quotes(column), } sql.append(query) return sql def sequence_reset_sql(self, style, model_list): from django.db import models output = [] query = self._sequence_reset_sql for model in model_list: for f in model._meta.local_fields: if isinstance(f, models.AutoField): no_autofield_sequence_name = self._get_no_autofield_sequence_name(model._meta.db_table) table = self.quote_name(model._meta.db_table) column = self.quote_name(f.column) output.append(query % { 'no_autofield_sequence_name': no_autofield_sequence_name, 'table': table, 'column': column, 'table_name': strip_quotes(table), 'column_name': strip_quotes(column), }) # Only one AutoField is allowed per model, so don't # continue to loop break for f in model._meta.many_to_many: if not f.remote_field.through: no_autofield_sequence_name = self._get_no_autofield_sequence_name(f.m2m_db_table()) table = self.quote_name(f.m2m_db_table()) column = self.quote_name('id') output.append(query % { 'no_autofield_sequence_name': no_autofield_sequence_name, 'table': table, 'column': column, 'table_name': strip_quotes(table), 'column_name': 'ID', }) return output def start_transaction_sql(self): return '' def tablespace_sql(self, tablespace, inline=False): if inline: return "USING INDEX TABLESPACE %s" % self.quote_name(tablespace) else: return "TABLESPACE %s" % self.quote_name(tablespace) def adapt_datefield_value(self, value): """ Transform a date value to an object compatible with what is expected by the backend driver for date columns. The default implementation transforms the date to text, but that is not necessary for Oracle. """ return value def adapt_datetimefield_value(self, value): """ Transform a datetime value to an object compatible with what is expected by the backend driver for datetime columns. If naive datetime is passed assumes that is in UTC. Normally Django models.DateTimeField makes sure that if USE_TZ is True passed datetime is timezone aware. """ if value is None: return None # Expression values are adapted by the database. if hasattr(value, 'resolve_expression'): return value # cx_Oracle doesn't support tz-aware datetimes if timezone.is_aware(value): if settings.USE_TZ: value = timezone.make_naive(value, self.connection.timezone) else: raise ValueError("Oracle backend does not support timezone-aware datetimes when USE_TZ is False.") return Oracle_datetime.from_datetime(value) def adapt_timefield_value(self, value): if value is None: return None # Expression values are adapted by the database. if hasattr(value, 'resolve_expression'): return value if isinstance(value, str): return datetime.datetime.strptime(value, '%H:%M:%S') # Oracle doesn't support tz-aware times if timezone.is_aware(value): raise ValueError("Oracle backend does not support timezone-aware times.") return Oracle_datetime(1900, 1, 1, value.hour, value.minute, value.second, value.microsecond) def combine_expression(self, connector, sub_expressions): lhs, rhs = sub_expressions if connector == '%%': return 'MOD(%s)' % ','.join(sub_expressions) elif connector == '&': return 'BITAND(%s)' % ','.join(sub_expressions) elif connector == '|': return 'BITAND(-%(lhs)s-1,%(rhs)s)+%(lhs)s' % {'lhs': lhs, 'rhs': rhs} elif connector == '<<': return '(%(lhs)s * POWER(2, %(rhs)s))' % {'lhs': lhs, 'rhs': rhs} elif connector == '>>': return 'FLOOR(%(lhs)s / POWER(2, %(rhs)s))' % {'lhs': lhs, 'rhs': rhs} elif connector == '^': return 'POWER(%s)' % ','.join(sub_expressions) return super().combine_expression(connector, sub_expressions) def _get_no_autofield_sequence_name(self, table): """ Manually created sequence name to keep backward compatibility for AutoFields that aren't Oracle identity columns. """ name_length = self.max_name_length() - 3 return '%s_SQ' % truncate_name(strip_quotes(table), name_length).upper() def _get_sequence_name(self, cursor, table, pk_name): cursor.execute(""" SELECT sequence_name FROM user_tab_identity_cols WHERE table_name = UPPER(%s) AND column_name = UPPER(%s)""", [table, pk_name]) row = cursor.fetchone() return self._get_no_autofield_sequence_name(table) if row is None else row[0] def bulk_insert_sql(self, fields, placeholder_rows): query = [] for row in placeholder_rows: select = [] for i, placeholder in enumerate(row): # A model without any fields has fields=[None]. if fields[i]: internal_type = getattr(fields[i], 'target_field', fields[i]).get_internal_type() placeholder = BulkInsertMapper.types.get(internal_type, '%s') % placeholder # Add columns aliases to the first select to avoid "ORA-00918: # column ambiguously defined" when two or more columns in the # first select have the same value. if not query: placeholder = '%s col_%s' % (placeholder, i) select.append(placeholder) query.append('SELECT %s FROM DUAL' % ', '.join(select)) # Bulk insert to tables with Oracle identity columns causes Oracle to # add sequence.nextval to it. Sequence.nextval cannot be used with the # UNION operator. To prevent incorrect SQL, move UNION to a subquery. return 'SELECT * FROM (%s)' % ' UNION ALL '.join(query) def subtract_temporals(self, internal_type, lhs, rhs): if internal_type == 'DateField': lhs_sql, lhs_params = lhs rhs_sql, rhs_params = rhs return "NUMTODSINTERVAL(TO_NUMBER(%s - %s), 'DAY')" % (lhs_sql, rhs_sql), lhs_params + rhs_params return super().subtract_temporals(internal_type, lhs, rhs) def bulk_batch_size(self, fields, objs): """Oracle restricts the number of parameters in a query.""" if fields: return self.connection.features.max_query_params // len(fields) return len(objs)
ab9f9322a92de1c4f85d9250551f227dbe70749a5e6cda4324731a02f11ead9a
import datetime from .base import Database class InsertVar: """ A late-binding cursor variable that can be passed to Cursor.execute as a parameter, in order to receive the id of the row created by an insert statement. """ types = { 'FloatField': Database.NATIVE_FLOAT, 'CharField': str, 'DateTimeField': Database.TIMESTAMP, 'DateField': Database.DATETIME, 'DecimalField': Database.NUMBER, } def __init__(self, field): internal_type = getattr(field, 'target_field', field).get_internal_type() self.db_type = self.types.get(internal_type, int) def bind_parameter(self, cursor): param = cursor.cursor.var(self.db_type) cursor._insert_id_var = param return param class Oracle_datetime(datetime.datetime): """ A datetime object, with an additional class attribute to tell cx_Oracle to save the microseconds too. """ input_size = Database.TIMESTAMP @classmethod def from_datetime(cls, dt): return Oracle_datetime( dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.microsecond, ) class BulkInsertMapper: BLOB = 'TO_BLOB(%s)' DATE = 'TO_DATE(%s)' INTERVAL = 'CAST(%s as INTERVAL DAY(9) TO SECOND(6))' NUMBER = 'TO_NUMBER(%s)' TIMESTAMP = 'TO_TIMESTAMP(%s)' types = { 'BigIntegerField': NUMBER, 'BinaryField': BLOB, 'BooleanField': NUMBER, 'DateField': DATE, 'DateTimeField': TIMESTAMP, 'DecimalField': NUMBER, 'DurationField': INTERVAL, 'FloatField': NUMBER, 'IntegerField': NUMBER, 'NullBooleanField': NUMBER, 'PositiveIntegerField': NUMBER, 'PositiveSmallIntegerField': NUMBER, 'SmallIntegerField': NUMBER, 'TimeField': TIMESTAMP, }
88705d05119a2c16beac8fbff6cf7f32ff2db4021eaa444a07dbbc3cccfeef27
from django.db.utils import ProgrammingError from django.utils.functional import cached_property class BaseDatabaseFeatures: gis_enabled = False allows_group_by_pk = False allows_group_by_selected_pks = False empty_fetchmany_value = [] update_can_self_select = True # Does the backend distinguish between '' and None? interprets_empty_strings_as_nulls = False # Does the backend allow inserting duplicate NULL rows in a nullable # unique field? All core backends implement this correctly, but other # databases such as SQL Server do not. supports_nullable_unique_constraints = True # Does the backend allow inserting duplicate rows when a unique_together # constraint exists and some fields are nullable but not all of them? supports_partially_nullable_unique_constraints = True can_use_chunked_reads = True can_return_columns_from_insert = False can_return_rows_from_bulk_insert = False has_bulk_insert = True uses_savepoints = True can_release_savepoints = False # If True, don't use integer foreign keys referring to, e.g., positive # integer primary keys. related_fields_match_type = False allow_sliced_subqueries_with_in = True has_select_for_update = False has_select_for_update_nowait = False has_select_for_update_skip_locked = False has_select_for_update_of = False # Does the database's SELECT FOR UPDATE OF syntax require a column rather # than a table? select_for_update_of_column = False # Does the default test database allow multiple connections? # Usually an indication that the test database is in-memory test_db_allows_multiple_connections = True # Can an object be saved without an explicit primary key? supports_unspecified_pk = False # Can a fixture contain forward references? i.e., are # FK constraints checked at the end of transaction, or # at the end of each save operation? supports_forward_references = True # Does the backend truncate names properly when they are too long? truncates_names = False # Is there a REAL datatype in addition to floats/doubles? has_real_datatype = False supports_subqueries_in_group_by = True # Is there a true datatype for uuid? has_native_uuid_field = False # Is there a true datatype for timedeltas? has_native_duration_field = False # Does the database driver supports same type temporal data subtraction # by returning the type used to store duration field? supports_temporal_subtraction = False # Does the __regex lookup support backreferencing and grouping? supports_regex_backreferencing = True # Can date/datetime lookups be performed using a string? supports_date_lookup_using_string = True # Can datetimes with timezones be used? supports_timezones = True # Does the database have a copy of the zoneinfo database? has_zoneinfo_database = True # When performing a GROUP BY, is an ORDER BY NULL required # to remove any ordering? requires_explicit_null_ordering_when_grouping = False # Does the backend order NULL values as largest or smallest? nulls_order_largest = False # The database's limit on the number of query parameters. max_query_params = None # Can an object have an autoincrement primary key of 0? MySQL says No. allows_auto_pk_0 = True # Do we need to NULL a ForeignKey out, or can the constraint check be # deferred can_defer_constraint_checks = False # date_interval_sql can properly handle mixed Date/DateTime fields and timedeltas supports_mixed_date_datetime_comparisons = True # Does the backend support tablespaces? Default to False because it isn't # in the SQL standard. supports_tablespaces = False # Does the backend reset sequences between tests? supports_sequence_reset = True # Can the backend introspect the default value of a column? can_introspect_default = True # Confirm support for introspected foreign keys # Every database can do this reliably, except MySQL, # which can't do it for MyISAM tables can_introspect_foreign_keys = True # Can the backend introspect an AutoField, instead of an IntegerField? can_introspect_autofield = False # Can the backend introspect a BigIntegerField, instead of an IntegerField? can_introspect_big_integer_field = True # Can the backend introspect an BinaryField, instead of an TextField? can_introspect_binary_field = True # Can the backend introspect an DecimalField, instead of an FloatField? can_introspect_decimal_field = True # Can the backend introspect a DurationField, instead of a BigIntegerField? can_introspect_duration_field = True # Can the backend introspect an IPAddressField, instead of an CharField? can_introspect_ip_address_field = False # Can the backend introspect a PositiveIntegerField, instead of an IntegerField? can_introspect_positive_integer_field = False # Can the backend introspect a SmallIntegerField, instead of an IntegerField? can_introspect_small_integer_field = False # Can the backend introspect a TimeField, instead of a DateTimeField? can_introspect_time_field = True # Some backends may not be able to differentiate BigAutoField or # SmallAutoField from other fields such as AutoField. introspected_big_auto_field_type = 'BigAutoField' introspected_small_auto_field_type = 'SmallAutoField' # Some backends may not be able to differentiate BooleanField from other # fields such as IntegerField. introspected_boolean_field_type = 'BooleanField' # Can the backend introspect the column order (ASC/DESC) for indexes? supports_index_column_ordering = True # Does the backend support introspection of materialized views? can_introspect_materialized_views = False # Support for the DISTINCT ON clause can_distinct_on_fields = False # Does the backend prevent running SQL queries in broken transactions? atomic_transactions = True # Can we roll back DDL in a transaction? can_rollback_ddl = False # Does it support operations requiring references rename in a transaction? supports_atomic_references_rename = True # Can we issue more than one ALTER COLUMN clause in an ALTER TABLE? supports_combined_alters = False # Does it support foreign keys? supports_foreign_keys = True # Can it create foreign key constraints inline when adding columns? can_create_inline_fk = True # Does it support CHECK constraints? supports_column_check_constraints = True supports_table_check_constraints = True # Does the backend support introspection of CHECK constraints? can_introspect_check_constraints = True # Does the backend support 'pyformat' style ("... %(name)s ...", {'name': value}) # parameter passing? Note this can be provided by the backend even if not # supported by the Python driver supports_paramstyle_pyformat = True # Does the backend require literal defaults, rather than parameterized ones? requires_literal_defaults = False # Does the backend require a connection reset after each material schema change? connection_persists_old_columns = False # What kind of error does the backend throw when accessing closed cursor? closed_cursor_error_class = ProgrammingError # Does 'a' LIKE 'A' match? has_case_insensitive_like = True # Suffix for backends that don't support "SELECT xxx;" queries. bare_select_suffix = '' # If NULL is implied on columns without needing to be explicitly specified implied_column_null = False # Does the backend support "select for update" queries with limit (and offset)? supports_select_for_update_with_limit = True # Does the backend ignore null expressions in GREATEST and LEAST queries unless # every expression is null? greatest_least_ignores_nulls = False # Can the backend clone databases for parallel test execution? # Defaults to False to allow third-party backends to opt-in. can_clone_databases = False # Does the backend consider table names with different casing to # be equal? ignores_table_name_case = False # Place FOR UPDATE right after FROM clause. Used on MSSQL. for_update_after_from = False # Combinatorial flags supports_select_union = True supports_select_intersection = True supports_select_difference = True supports_slicing_ordering_in_compound = False supports_parentheses_in_compound = True # Does the database support SQL 2003 FILTER (WHERE ...) in aggregate # expressions? supports_aggregate_filter_clause = False # Does the backend support indexing a TextField? supports_index_on_text_field = True # Does the backend support window expressions (expression OVER (...))? supports_over_clause = False supports_frame_range_fixed_distance = False # Does the backend support CAST with precision? supports_cast_with_precision = True # How many second decimals does the database return when casting a value to # a type with time? time_cast_precision = 6 # SQL to create a procedure for use by the Django test suite. The # functionality of the procedure isn't important. create_test_procedure_without_params_sql = None create_test_procedure_with_int_param_sql = None # Does the backend support keyword parameters for cursor.callproc()? supports_callproc_kwargs = False # Convert CharField results from bytes to str in database functions. db_functions_convert_bytes_to_str = False # What formats does the backend EXPLAIN syntax support? supported_explain_formats = set() # Does DatabaseOperations.explain_query_prefix() raise ValueError if # unknown kwargs are passed to QuerySet.explain()? validates_explain_options = True # Does the backend support the default parameter in lead() and lag()? supports_default_in_lead_lag = True # Does the backend support ignoring constraint or uniqueness errors during # INSERT? supports_ignore_conflicts = True # Does this backend require casting the results of CASE expressions used # in UPDATE statements to ensure the expression has the correct type? requires_casted_case_in_updates = False # Does the backend support partial indexes (CREATE INDEX ... WHERE ...)? supports_partial_indexes = True supports_functions_in_partial_indexes = True # Does the database allow more than one constraint or index on the same # field(s)? allows_multiple_constraints_on_same_fields = True # Does the backend support boolean expressions in the SELECT clause? supports_boolean_expr_in_select_clause = True def __init__(self, connection): self.connection = connection @cached_property def supports_explaining_query_execution(self): """Does this backend support explaining query execution?""" return self.connection.ops.explain_prefix is not None @cached_property def supports_transactions(self): """Confirm support for transactions.""" with self.connection.cursor() as cursor: cursor.execute('CREATE TABLE ROLLBACK_TEST (X INT)') self.connection.set_autocommit(False) cursor.execute('INSERT INTO ROLLBACK_TEST (X) VALUES (8)') self.connection.rollback() self.connection.set_autocommit(True) cursor.execute('SELECT COUNT(X) FROM ROLLBACK_TEST') count, = cursor.fetchone() cursor.execute('DROP TABLE ROLLBACK_TEST') return count == 0
cc89aea54b05134b4d940f5a0dbc2247c0f12d08793105112be790a90a68bd2e
import datetime import decimal from importlib import import_module import sqlparse from django.conf import settings from django.db import NotSupportedError, transaction from django.db.backends import utils from django.utils import timezone from django.utils.encoding import force_str class BaseDatabaseOperations: """ Encapsulate backend-specific differences, such as the way a backend performs ordering or calculates the ID of a recently-inserted row. """ compiler_module = "django.db.models.sql.compiler" # Integer field safe ranges by `internal_type` as documented # in docs/ref/models/fields.txt. integer_field_ranges = { 'SmallIntegerField': (-32768, 32767), 'IntegerField': (-2147483648, 2147483647), 'BigIntegerField': (-9223372036854775808, 9223372036854775807), 'PositiveSmallIntegerField': (0, 32767), 'PositiveIntegerField': (0, 2147483647), } set_operators = { 'union': 'UNION', 'intersection': 'INTERSECT', 'difference': 'EXCEPT', } # Mapping of Field.get_internal_type() (typically the model field's class # name) to the data type to use for the Cast() function, if different from # DatabaseWrapper.data_types. cast_data_types = {} # CharField data type if the max_length argument isn't provided. cast_char_field_without_max_length = None # Start and end points for window expressions. PRECEDING = 'PRECEDING' FOLLOWING = 'FOLLOWING' UNBOUNDED_PRECEDING = 'UNBOUNDED ' + PRECEDING UNBOUNDED_FOLLOWING = 'UNBOUNDED ' + FOLLOWING CURRENT_ROW = 'CURRENT ROW' # Prefix for EXPLAIN queries, or None EXPLAIN isn't supported. explain_prefix = None def __init__(self, connection): self.connection = connection self._cache = None def autoinc_sql(self, table, column): """ Return any SQL needed to support auto-incrementing primary keys, or None if no SQL is necessary. This SQL is executed when a table is created. """ return None def bulk_batch_size(self, fields, objs): """ Return the maximum allowed batch size for the backend. The fields are the fields going to be inserted in the batch, the objs contains all the objects to be inserted. """ return len(objs) def cache_key_culling_sql(self): """ Return an SQL query that retrieves the first cache key greater than the n smallest. This is used by the 'db' cache backend to determine where to start culling. """ return "SELECT cache_key FROM %s ORDER BY cache_key LIMIT 1 OFFSET %%s" def unification_cast_sql(self, output_field): """ Given a field instance, return the SQL that casts the result of a union to that type. The resulting string should contain a '%s' placeholder for the expression being cast. """ return '%s' def date_extract_sql(self, lookup_type, field_name): """ Given a lookup_type of 'year', 'month', or 'day', return the SQL that extracts a value from the given date field field_name. """ raise NotImplementedError('subclasses of BaseDatabaseOperations may require a date_extract_sql() method') def date_interval_sql(self, timedelta): """ Implement the date interval functionality for expressions. """ raise NotImplementedError('subclasses of BaseDatabaseOperations may require a date_interval_sql() method') def date_trunc_sql(self, lookup_type, field_name): """ Given a lookup_type of 'year', 'month', or 'day', return the SQL that truncates the given date field field_name to a date object with only the given specificity. """ raise NotImplementedError('subclasses of BaseDatabaseOperations may require a date_trunc_sql() method.') def datetime_cast_date_sql(self, field_name, tzname): """ Return the SQL to cast a datetime value to date value. """ raise NotImplementedError( 'subclasses of BaseDatabaseOperations may require a ' 'datetime_cast_date_sql() method.' ) def datetime_cast_time_sql(self, field_name, tzname): """ Return the SQL to cast a datetime value to time value. """ raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetime_cast_time_sql() method') def datetime_extract_sql(self, lookup_type, field_name, tzname): """ Given a lookup_type of 'year', 'month', 'day', 'hour', 'minute', or 'second', return the SQL that extracts a value from the given datetime field field_name. """ raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetime_extract_sql() method') def datetime_trunc_sql(self, lookup_type, field_name, tzname): """ Given a lookup_type of 'year', 'month', 'day', 'hour', 'minute', or 'second', return the SQL that truncates the given datetime field field_name to a datetime object with only the given specificity. """ raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetime_trunc_sql() method') def time_trunc_sql(self, lookup_type, field_name): """ Given a lookup_type of 'hour', 'minute' or 'second', return the SQL that truncates the given time field field_name to a time object with only the given specificity. """ raise NotImplementedError('subclasses of BaseDatabaseOperations may require a time_trunc_sql() method') def time_extract_sql(self, lookup_type, field_name): """ Given a lookup_type of 'hour', 'minute', or 'second', return the SQL that extracts a value from the given time field field_name. """ return self.date_extract_sql(lookup_type, field_name) def deferrable_sql(self): """ Return the SQL to make a constraint "initially deferred" during a CREATE TABLE statement. """ return '' def distinct_sql(self, fields, params): """ Return an SQL DISTINCT clause which removes duplicate rows from the result set. If any fields are given, only check the given fields for duplicates. """ if fields: raise NotSupportedError('DISTINCT ON fields is not supported by this database backend') else: return ['DISTINCT'], [] def fetch_returned_insert_id(self, cursor): """ Given a cursor object that has just performed an INSERT...RETURNING statement into a table that has an auto-incrementing ID, return the newly created ID. """ return cursor.fetchone()[0] def field_cast_sql(self, db_type, internal_type): """ Given a column type (e.g. 'BLOB', 'VARCHAR') and an internal type (e.g. 'GenericIPAddressField'), return the SQL to cast it before using it in a WHERE statement. The resulting string should contain a '%s' placeholder for the column being searched against. """ return '%s' def force_no_ordering(self): """ Return a list used in the "ORDER BY" clause to force no ordering at all. Return an empty list to include nothing in the ordering. """ return [] def for_update_sql(self, nowait=False, skip_locked=False, of=()): """ Return the FOR UPDATE SQL clause to lock rows for an update operation. """ return 'FOR UPDATE%s%s%s' % ( ' OF %s' % ', '.join(of) if of else '', ' NOWAIT' if nowait else '', ' SKIP LOCKED' if skip_locked else '', ) def _get_limit_offset_params(self, low_mark, high_mark): offset = low_mark or 0 if high_mark is not None: return (high_mark - offset), offset elif offset: return self.connection.ops.no_limit_value(), offset return None, offset def limit_offset_sql(self, low_mark, high_mark): """Return LIMIT/OFFSET SQL clause.""" limit, offset = self._get_limit_offset_params(low_mark, high_mark) return ' '.join(sql for sql in ( ('LIMIT %d' % limit) if limit else None, ('OFFSET %d' % offset) if offset else None, ) if sql) def last_executed_query(self, cursor, sql, params): """ Return a string of the query last executed by the given cursor, with placeholders replaced with actual values. `sql` is the raw query containing placeholders and `params` is the sequence of parameters. These are used by default, but this method exists for database backends to provide a better implementation according to their own quoting schemes. """ # Convert params to contain string values. def to_string(s): return force_str(s, strings_only=True, errors='replace') if isinstance(params, (list, tuple)): u_params = tuple(to_string(val) for val in params) elif params is None: u_params = () else: u_params = {to_string(k): to_string(v) for k, v in params.items()} return "QUERY = %r - PARAMS = %r" % (sql, u_params) def last_insert_id(self, cursor, table_name, pk_name): """ Given a cursor object that has just performed an INSERT statement into a table that has an auto-incrementing ID, return the newly created ID. `pk_name` is the name of the primary-key column. """ return cursor.lastrowid def lookup_cast(self, lookup_type, internal_type=None): """ Return the string to use in a query when performing lookups ("contains", "like", etc.). It should contain a '%s' placeholder for the column being searched against. """ return "%s" def max_in_list_size(self): """ Return the maximum number of items that can be passed in a single 'IN' list condition, or None if the backend does not impose a limit. """ return None def max_name_length(self): """ Return the maximum length of table and column names, or None if there is no limit. """ return None def no_limit_value(self): """ Return the value to use for the LIMIT when we are wanting "LIMIT infinity". Return None if the limit clause can be omitted in this case. """ raise NotImplementedError('subclasses of BaseDatabaseOperations may require a no_limit_value() method') def pk_default_value(self): """ Return the value to use during an INSERT statement to specify that the field should use its default value. """ return 'DEFAULT' def prepare_sql_script(self, sql): """ Take an SQL script that may contain multiple lines and return a list of statements to feed to successive cursor.execute() calls. Since few databases are able to process raw SQL scripts in a single cursor.execute() call and PEP 249 doesn't talk about this use case, the default implementation is conservative. """ return [ sqlparse.format(statement, strip_comments=True) for statement in sqlparse.split(sql) if statement ] def process_clob(self, value): """ Return the value of a CLOB column, for backends that return a locator object that requires additional processing. """ return value def return_insert_id(self, field): """ For backends that support returning the last insert ID as part of an insert query, return the SQL and params to append to the INSERT query. The returned fragment should contain a format string to hold the appropriate column. """ pass def compiler(self, compiler_name): """ Return the SQLCompiler class corresponding to the given name, in the namespace corresponding to the `compiler_module` attribute on this backend. """ if self._cache is None: self._cache = import_module(self.compiler_module) return getattr(self._cache, compiler_name) def quote_name(self, name): """ Return a quoted version of the given table, index, or column name. Do not quote the given name if it's already been quoted. """ raise NotImplementedError('subclasses of BaseDatabaseOperations may require a quote_name() method') def random_function_sql(self): """Return an SQL expression that returns a random value.""" return 'RANDOM()' def regex_lookup(self, lookup_type): """ Return the string to use in a query when performing regular expression lookups (using "regex" or "iregex"). It should contain a '%s' placeholder for the column being searched against. If the feature is not supported (or part of it is not supported), raise NotImplementedError. """ raise NotImplementedError('subclasses of BaseDatabaseOperations may require a regex_lookup() method') def savepoint_create_sql(self, sid): """ Return the SQL for starting a new savepoint. Only required if the "uses_savepoints" feature is True. The "sid" parameter is a string for the savepoint id. """ return "SAVEPOINT %s" % self.quote_name(sid) def savepoint_commit_sql(self, sid): """ Return the SQL for committing the given savepoint. """ return "RELEASE SAVEPOINT %s" % self.quote_name(sid) def savepoint_rollback_sql(self, sid): """ Return the SQL for rolling back the given savepoint. """ return "ROLLBACK TO SAVEPOINT %s" % self.quote_name(sid) def set_time_zone_sql(self): """ Return the SQL that will set the connection's time zone. Return '' if the backend doesn't support time zones. """ return '' def sql_flush(self, style, tables, sequences, allow_cascade=False): """ Return a list of SQL statements required to remove all data from the given database tables (without actually removing the tables themselves) and the SQL statements required to reset the sequences passed in `sequences`. The `style` argument is a Style object as returned by either color_style() or no_style() in django.core.management.color. The `allow_cascade` argument determines whether truncation may cascade to tables with foreign keys pointing the tables being truncated. PostgreSQL requires a cascade even if these tables are empty. """ raise NotImplementedError('subclasses of BaseDatabaseOperations must provide a sql_flush() method') def execute_sql_flush(self, using, sql_list): """Execute a list of SQL statements to flush the database.""" with transaction.atomic(using=using, savepoint=self.connection.features.can_rollback_ddl): with self.connection.cursor() as cursor: for sql in sql_list: cursor.execute(sql) def sequence_reset_by_name_sql(self, style, sequences): """ Return a list of the SQL statements required to reset sequences passed in `sequences`. The `style` argument is a Style object as returned by either color_style() or no_style() in django.core.management.color. """ return [] def sequence_reset_sql(self, style, model_list): """ Return a list of the SQL statements required to reset sequences for the given models. The `style` argument is a Style object as returned by either color_style() or no_style() in django.core.management.color. """ return [] # No sequence reset required by default. def start_transaction_sql(self): """Return the SQL statement required to start a transaction.""" return "BEGIN;" def end_transaction_sql(self, success=True): """Return the SQL statement required to end a transaction.""" if not success: return "ROLLBACK;" return "COMMIT;" def tablespace_sql(self, tablespace, inline=False): """ Return the SQL that will be used in a query to define the tablespace. Return '' if the backend doesn't support tablespaces. If `inline` is True, append the SQL to a row; otherwise append it to the entire CREATE TABLE or CREATE INDEX statement. """ return '' def prep_for_like_query(self, x): """Prepare a value for use in a LIKE query.""" return str(x).replace("\\", "\\\\").replace("%", r"\%").replace("_", r"\_") # Same as prep_for_like_query(), but called for "iexact" matches, which # need not necessarily be implemented using "LIKE" in the backend. prep_for_iexact_query = prep_for_like_query def validate_autopk_value(self, value): """ Certain backends do not accept some values for "serial" fields (for example zero in MySQL). Raise a ValueError if the value is invalid, otherwise return the validated value. """ return value def adapt_unknown_value(self, value): """ Transform a value to something compatible with the backend driver. This method only depends on the type of the value. It's designed for cases where the target type isn't known, such as .raw() SQL queries. As a consequence it may not work perfectly in all circumstances. """ if isinstance(value, datetime.datetime): # must be before date return self.adapt_datetimefield_value(value) elif isinstance(value, datetime.date): return self.adapt_datefield_value(value) elif isinstance(value, datetime.time): return self.adapt_timefield_value(value) elif isinstance(value, decimal.Decimal): return self.adapt_decimalfield_value(value) else: return value def adapt_datefield_value(self, value): """ Transform a date value to an object compatible with what is expected by the backend driver for date columns. """ if value is None: return None return str(value) def adapt_datetimefield_value(self, value): """ Transform a datetime value to an object compatible with what is expected by the backend driver for datetime columns. """ if value is None: return None return str(value) def adapt_timefield_value(self, value): """ Transform a time value to an object compatible with what is expected by the backend driver for time columns. """ if value is None: return None if timezone.is_aware(value): raise ValueError("Django does not support timezone-aware times.") return str(value) def adapt_decimalfield_value(self, value, max_digits=None, decimal_places=None): """ Transform a decimal.Decimal value to an object compatible with what is expected by the backend driver for decimal (numeric) columns. """ return utils.format_number(value, max_digits, decimal_places) def adapt_ipaddressfield_value(self, value): """ Transform a string representation of an IP address into the expected type for the backend driver. """ return value or None def year_lookup_bounds_for_date_field(self, value): """ Return a two-elements list with the lower and upper bound to be used with a BETWEEN operator to query a DateField value using a year lookup. `value` is an int, containing the looked-up year. """ first = datetime.date(value, 1, 1) second = datetime.date(value, 12, 31) first = self.adapt_datefield_value(first) second = self.adapt_datefield_value(second) return [first, second] def year_lookup_bounds_for_datetime_field(self, value): """ Return a two-elements list with the lower and upper bound to be used with a BETWEEN operator to query a DateTimeField value using a year lookup. `value` is an int, containing the looked-up year. """ first = datetime.datetime(value, 1, 1) second = datetime.datetime(value, 12, 31, 23, 59, 59, 999999) if settings.USE_TZ: tz = timezone.get_current_timezone() first = timezone.make_aware(first, tz) second = timezone.make_aware(second, tz) first = self.adapt_datetimefield_value(first) second = self.adapt_datetimefield_value(second) return [first, second] def get_db_converters(self, expression): """ Return a list of functions needed to convert field data. Some field types on some backends do not provide data in the correct format, this is the hook for converter functions. """ return [] def convert_durationfield_value(self, value, expression, connection): if value is not None: return datetime.timedelta(0, 0, value) def check_expression_support(self, expression): """ Check that the backend supports the provided expression. This is used on specific backends to rule out known expressions that have problematic or nonexistent implementations. If the expression has a known problem, the backend should raise NotSupportedError. """ pass def combine_expression(self, connector, sub_expressions): """ Combine a list of subexpressions into a single expression, using the provided connecting operator. This is required because operators can vary between backends (e.g., Oracle with %% and &) and between subexpression types (e.g., date expressions). """ conn = ' %s ' % connector return conn.join(sub_expressions) def combine_duration_expression(self, connector, sub_expressions): return self.combine_expression(connector, sub_expressions) def binary_placeholder_sql(self, value): """ Some backends require special syntax to insert binary content (MySQL for example uses '_binary %s'). """ return '%s' def modify_insert_params(self, placeholder, params): """ Allow modification of insert parameters. Needed for Oracle Spatial backend due to #10888. """ return params def integer_field_range(self, internal_type): """ Given an integer field internal type (e.g. 'PositiveIntegerField'), return a tuple of the (min_value, max_value) form representing the range of the column type bound to the field. """ return self.integer_field_ranges[internal_type] def subtract_temporals(self, internal_type, lhs, rhs): if self.connection.features.supports_temporal_subtraction: lhs_sql, lhs_params = lhs rhs_sql, rhs_params = rhs return "(%s - %s)" % (lhs_sql, rhs_sql), lhs_params + rhs_params raise NotSupportedError("This backend does not support %s subtraction." % internal_type) def window_frame_start(self, start): if isinstance(start, int): if start < 0: return '%d %s' % (abs(start), self.PRECEDING) elif start == 0: return self.CURRENT_ROW elif start is None: return self.UNBOUNDED_PRECEDING raise ValueError("start argument must be a negative integer, zero, or None, but got '%s'." % start) def window_frame_end(self, end): if isinstance(end, int): if end == 0: return self.CURRENT_ROW elif end > 0: return '%d %s' % (end, self.FOLLOWING) elif end is None: return self.UNBOUNDED_FOLLOWING raise ValueError("end argument must be a positive integer, zero, or None, but got '%s'." % end) def window_frame_rows_start_end(self, start=None, end=None): """ Return SQL for start and end points in an OVER clause window frame. """ if not self.connection.features.supports_over_clause: raise NotSupportedError('This backend does not support window expressions.') return self.window_frame_start(start), self.window_frame_end(end) def window_frame_range_start_end(self, start=None, end=None): return self.window_frame_rows_start_end(start, end) def explain_query_prefix(self, format=None, **options): if not self.connection.features.supports_explaining_query_execution: raise NotSupportedError('This backend does not support explaining query execution.') if format: supported_formats = self.connection.features.supported_explain_formats normalized_format = format.upper() if normalized_format not in supported_formats: msg = '%s is not a recognized format.' % normalized_format if supported_formats: msg += ' Allowed formats: %s' % ', '.join(sorted(supported_formats)) raise ValueError(msg) if options: raise ValueError('Unknown options: %s' % ', '.join(sorted(options.keys()))) return self.explain_prefix def insert_statement(self, ignore_conflicts=False): return 'INSERT INTO' def ignore_conflicts_suffix_sql(self, ignore_conflicts=None): return ''
4ac137378c3b875ac00fd017a397b9d7636f3293b565658fe331c2267397e6b9
import logging from datetime import datetime from django.db.backends.ddl_references import ( Columns, ForeignKeyName, IndexName, Statement, Table, ) from django.db.backends.utils import names_digest, split_identifier from django.db.models import Index from django.db.transaction import TransactionManagementError, atomic from django.utils import timezone logger = logging.getLogger('django.db.backends.schema') def _is_relevant_relation(relation, altered_field): """ When altering the given field, must constraints on its model from the given relation be temporarily dropped? """ field = relation.field if field.many_to_many: # M2M reverse field return False if altered_field.primary_key and field.to_fields == [None]: # Foreign key constraint on the primary key, which is being altered. return True # Is the constraint targeting the field being altered? return altered_field.name in field.to_fields def _related_non_m2m_objects(old_field, new_field): # Filter out m2m objects from reverse relations. # Return (old_relation, new_relation) tuples. return zip( (obj for obj in old_field.model._meta.related_objects if _is_relevant_relation(obj, old_field)), (obj for obj in new_field.model._meta.related_objects if _is_relevant_relation(obj, new_field)) ) class BaseDatabaseSchemaEditor: """ This class and its subclasses are responsible for emitting schema-changing statements to the databases - model creation/removal/alteration, field renaming, index fiddling, and so on. """ # Overrideable SQL templates sql_create_table = "CREATE TABLE %(table)s (%(definition)s)" sql_rename_table = "ALTER TABLE %(old_table)s RENAME TO %(new_table)s" sql_retablespace_table = "ALTER TABLE %(table)s SET TABLESPACE %(new_tablespace)s" sql_delete_table = "DROP TABLE %(table)s CASCADE" sql_create_column = "ALTER TABLE %(table)s ADD COLUMN %(column)s %(definition)s" sql_alter_column = "ALTER TABLE %(table)s %(changes)s" sql_alter_column_type = "ALTER COLUMN %(column)s TYPE %(type)s" sql_alter_column_null = "ALTER COLUMN %(column)s DROP NOT NULL" sql_alter_column_not_null = "ALTER COLUMN %(column)s SET NOT NULL" sql_alter_column_default = "ALTER COLUMN %(column)s SET DEFAULT %(default)s" sql_alter_column_no_default = "ALTER COLUMN %(column)s DROP DEFAULT" sql_delete_column = "ALTER TABLE %(table)s DROP COLUMN %(column)s CASCADE" sql_rename_column = "ALTER TABLE %(table)s RENAME COLUMN %(old_column)s TO %(new_column)s" sql_update_with_default = "UPDATE %(table)s SET %(column)s = %(default)s WHERE %(column)s IS NULL" sql_unique_constraint = "UNIQUE (%(columns)s)" sql_check_constraint = "CHECK (%(check)s)" sql_delete_constraint = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s" sql_constraint = "CONSTRAINT %(name)s %(constraint)s" sql_create_check = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s CHECK (%(check)s)" sql_delete_check = sql_delete_constraint sql_create_unique = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s UNIQUE (%(columns)s)" sql_delete_unique = sql_delete_constraint sql_create_fk = ( "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s FOREIGN KEY (%(column)s) " "REFERENCES %(to_table)s (%(to_column)s)%(deferrable)s" ) sql_create_inline_fk = None sql_create_column_inline_fk = None sql_delete_fk = sql_delete_constraint sql_create_index = "CREATE INDEX %(name)s ON %(table)s (%(columns)s)%(extra)s%(condition)s" sql_create_unique_index = "CREATE UNIQUE INDEX %(name)s ON %(table)s (%(columns)s)%(condition)s" sql_delete_index = "DROP INDEX %(name)s" sql_create_pk = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s PRIMARY KEY (%(columns)s)" sql_delete_pk = sql_delete_constraint sql_delete_procedure = 'DROP PROCEDURE %(procedure)s' def __init__(self, connection, collect_sql=False, atomic=True): self.connection = connection self.collect_sql = collect_sql if self.collect_sql: self.collected_sql = [] self.atomic_migration = self.connection.features.can_rollback_ddl and atomic # State-managing methods def __enter__(self): self.deferred_sql = [] if self.atomic_migration: self.atomic = atomic(self.connection.alias) self.atomic.__enter__() return self def __exit__(self, exc_type, exc_value, traceback): if exc_type is None: for sql in self.deferred_sql: self.execute(sql) if self.atomic_migration: self.atomic.__exit__(exc_type, exc_value, traceback) # Core utility functions def execute(self, sql, params=()): """Execute the given SQL statement, with optional parameters.""" # Don't perform the transactional DDL check if SQL is being collected # as it's not going to be executed anyway. if not self.collect_sql and self.connection.in_atomic_block and not self.connection.features.can_rollback_ddl: raise TransactionManagementError( "Executing DDL statements while in a transaction on databases " "that can't perform a rollback is prohibited." ) # Account for non-string statement objects. sql = str(sql) # Log the command we're running, then run it logger.debug("%s; (params %r)", sql, params, extra={'params': params, 'sql': sql}) if self.collect_sql: ending = "" if sql.endswith(";") else ";" if params is not None: self.collected_sql.append((sql % tuple(map(self.quote_value, params))) + ending) else: self.collected_sql.append(sql + ending) else: with self.connection.cursor() as cursor: cursor.execute(sql, params) def quote_name(self, name): return self.connection.ops.quote_name(name) def table_sql(self, model): """Take a model and return its table definition.""" # Add any unique_togethers (always deferred, as some fields might be # created afterwards, like geometry fields with some backends). for fields in model._meta.unique_together: columns = [model._meta.get_field(field).column for field in fields] self.deferred_sql.append(self._create_unique_sql(model, columns)) # Create column SQL, add FK deferreds if needed. column_sqls = [] params = [] for field in model._meta.local_fields: # SQL. definition, extra_params = self.column_sql(model, field) if definition is None: continue # Check constraints can go on the column SQL here. db_params = field.db_parameters(connection=self.connection) if db_params['check']: definition += ' ' + self.sql_check_constraint % db_params # Autoincrement SQL (for backends with inline variant). col_type_suffix = field.db_type_suffix(connection=self.connection) if col_type_suffix: definition += ' %s' % col_type_suffix params.extend(extra_params) # FK. if field.remote_field and field.db_constraint: to_table = field.remote_field.model._meta.db_table to_column = field.remote_field.model._meta.get_field(field.remote_field.field_name).column if self.sql_create_inline_fk: definition += ' ' + self.sql_create_inline_fk % { 'to_table': self.quote_name(to_table), 'to_column': self.quote_name(to_column), } elif self.connection.features.supports_foreign_keys: self.deferred_sql.append(self._create_fk_sql(model, field, '_fk_%(to_table)s_%(to_column)s')) # Add the SQL to our big list. column_sqls.append('%s %s' % ( self.quote_name(field.column), definition, )) # Autoincrement SQL (for backends with post table definition # variant). if field.get_internal_type() in ('AutoField', 'BigAutoField', 'SmallAutoField'): autoinc_sql = self.connection.ops.autoinc_sql(model._meta.db_table, field.column) if autoinc_sql: self.deferred_sql.extend(autoinc_sql) constraints = [constraint.constraint_sql(model, self) for constraint in model._meta.constraints] sql = self.sql_create_table % { 'table': self.quote_name(model._meta.db_table), 'definition': ', '.join(constraint for constraint in (*column_sqls, *constraints) if constraint), } if model._meta.db_tablespace: tablespace_sql = self.connection.ops.tablespace_sql(model._meta.db_tablespace) if tablespace_sql: sql += ' ' + tablespace_sql return sql, params # Field <-> database mapping functions def column_sql(self, model, field, include_default=False): """ Take a field and return its column definition. The field must already have had set_attributes_from_name() called. """ # Get the column's type and use that as the basis of the SQL db_params = field.db_parameters(connection=self.connection) sql = db_params['type'] params = [] # Check for fields that aren't actually columns (e.g. M2M) if sql is None: return None, None # Work out nullability null = field.null # If we were told to include a default value, do so include_default = include_default and not self.skip_default(field) if include_default: default_value = self.effective_default(field) if default_value is not None: if self.connection.features.requires_literal_defaults: # Some databases can't take defaults as a parameter (oracle) # If this is the case, the individual schema backend should # implement prepare_default sql += " DEFAULT %s" % self.prepare_default(default_value) else: sql += " DEFAULT %s" params += [default_value] # Oracle treats the empty string ('') as null, so coerce the null # option whenever '' is a possible value. if (field.empty_strings_allowed and not field.primary_key and self.connection.features.interprets_empty_strings_as_nulls): null = True if null and not self.connection.features.implied_column_null: sql += " NULL" elif not null: sql += " NOT NULL" # Primary key/unique outputs if field.primary_key: sql += " PRIMARY KEY" elif field.unique: sql += " UNIQUE" # Optionally add the tablespace if it's an implicitly indexed column tablespace = field.db_tablespace or model._meta.db_tablespace if tablespace and self.connection.features.supports_tablespaces and field.unique: sql += " %s" % self.connection.ops.tablespace_sql(tablespace, inline=True) # Return the sql return sql, params def skip_default(self, field): """ Some backends don't accept default values for certain columns types (i.e. MySQL longtext and longblob). """ return False def prepare_default(self, value): """ Only used for backends which have requires_literal_defaults feature """ raise NotImplementedError( 'subclasses of BaseDatabaseSchemaEditor for backends which have ' 'requires_literal_defaults must provide a prepare_default() method' ) @staticmethod def _effective_default(field): # This method allows testing its logic without a connection. if field.has_default(): default = field.get_default() elif not field.null and field.blank and field.empty_strings_allowed: if field.get_internal_type() == "BinaryField": default = bytes() else: default = str() elif getattr(field, 'auto_now', False) or getattr(field, 'auto_now_add', False): default = datetime.now() internal_type = field.get_internal_type() if internal_type == 'DateField': default = default.date() elif internal_type == 'TimeField': default = default.time() elif internal_type == 'DateTimeField': default = timezone.now() else: default = None return default def effective_default(self, field): """Return a field's effective database default value.""" return field.get_db_prep_save(self._effective_default(field), self.connection) def quote_value(self, value): """ Return a quoted version of the value so it's safe to use in an SQL string. This is not safe against injection from user code; it is intended only for use in making SQL scripts or preparing default values for particularly tricky backends (defaults are not user-defined, though, so this is safe). """ raise NotImplementedError() # Actions def create_model(self, model): """ Create a table and any accompanying indexes or unique constraints for the given `model`. """ sql, params = self.table_sql(model) # Prevent using [] as params, in the case a literal '%' is used in the definition self.execute(sql, params or None) # Add any field index and index_together's (deferred as SQLite _remake_table needs it) self.deferred_sql.extend(self._model_indexes_sql(model)) # Make M2M tables for field in model._meta.local_many_to_many: if field.remote_field.through._meta.auto_created: self.create_model(field.remote_field.through) def delete_model(self, model): """Delete a model from the database.""" # Handle auto-created intermediary models for field in model._meta.local_many_to_many: if field.remote_field.through._meta.auto_created: self.delete_model(field.remote_field.through) # Delete the table self.execute(self.sql_delete_table % { "table": self.quote_name(model._meta.db_table), }) # Remove all deferred statements referencing the deleted table. for sql in list(self.deferred_sql): if isinstance(sql, Statement) and sql.references_table(model._meta.db_table): self.deferred_sql.remove(sql) def add_index(self, model, index): """Add an index on a model.""" self.execute(index.create_sql(model, self), params=None) def remove_index(self, model, index): """Remove an index from a model.""" self.execute(index.remove_sql(model, self)) def add_constraint(self, model, constraint): """Add a constraint to a model.""" sql = constraint.create_sql(model, self) if sql: self.execute(sql) def remove_constraint(self, model, constraint): """Remove a constraint from a model.""" sql = constraint.remove_sql(model, self) if sql: self.execute(sql) def alter_unique_together(self, model, old_unique_together, new_unique_together): """ Deal with a model changing its unique_together. The input unique_togethers must be doubly-nested, not the single-nested ["foo", "bar"] format. """ olds = {tuple(fields) for fields in old_unique_together} news = {tuple(fields) for fields in new_unique_together} # Deleted uniques for fields in olds.difference(news): self._delete_composed_index(model, fields, {'unique': True}, self.sql_delete_unique) # Created uniques for fields in news.difference(olds): columns = [model._meta.get_field(field).column for field in fields] self.execute(self._create_unique_sql(model, columns)) def alter_index_together(self, model, old_index_together, new_index_together): """ Deal with a model changing its index_together. The input index_togethers must be doubly-nested, not the single-nested ["foo", "bar"] format. """ olds = {tuple(fields) for fields in old_index_together} news = {tuple(fields) for fields in new_index_together} # Deleted indexes for fields in olds.difference(news): self._delete_composed_index(model, fields, {'index': True}, self.sql_delete_index) # Created indexes for field_names in news.difference(olds): fields = [model._meta.get_field(field) for field in field_names] self.execute(self._create_index_sql(model, fields, suffix="_idx")) def _delete_composed_index(self, model, fields, constraint_kwargs, sql): meta_constraint_names = {constraint.name for constraint in model._meta.constraints} meta_index_names = {constraint.name for constraint in model._meta.indexes} columns = [model._meta.get_field(field).column for field in fields] constraint_names = self._constraint_names( model, columns, exclude=meta_constraint_names | meta_index_names, **constraint_kwargs ) if len(constraint_names) != 1: raise ValueError("Found wrong number (%s) of constraints for %s(%s)" % ( len(constraint_names), model._meta.db_table, ", ".join(columns), )) self.execute(self._delete_constraint_sql(sql, model, constraint_names[0])) def alter_db_table(self, model, old_db_table, new_db_table): """Rename the table a model points to.""" if (old_db_table == new_db_table or (self.connection.features.ignores_table_name_case and old_db_table.lower() == new_db_table.lower())): return self.execute(self.sql_rename_table % { "old_table": self.quote_name(old_db_table), "new_table": self.quote_name(new_db_table), }) # Rename all references to the old table name. for sql in self.deferred_sql: if isinstance(sql, Statement): sql.rename_table_references(old_db_table, new_db_table) def alter_db_tablespace(self, model, old_db_tablespace, new_db_tablespace): """Move a model's table between tablespaces.""" self.execute(self.sql_retablespace_table % { "table": self.quote_name(model._meta.db_table), "old_tablespace": self.quote_name(old_db_tablespace), "new_tablespace": self.quote_name(new_db_tablespace), }) def add_field(self, model, field): """ Create a field on a model. Usually involves adding a column, but may involve adding a table instead (for M2M fields). """ # Special-case implicit M2M tables if field.many_to_many and field.remote_field.through._meta.auto_created: return self.create_model(field.remote_field.through) # Get the column's definition definition, params = self.column_sql(model, field, include_default=True) # It might not actually have a column behind it if definition is None: return # Check constraints can go on the column SQL here db_params = field.db_parameters(connection=self.connection) if db_params['check']: definition += " " + self.sql_check_constraint % db_params if field.remote_field and self.connection.features.supports_foreign_keys and field.db_constraint: constraint_suffix = '_fk_%(to_table)s_%(to_column)s' # Add FK constraint inline, if supported. if self.sql_create_column_inline_fk: to_table = field.remote_field.model._meta.db_table to_column = field.remote_field.model._meta.get_field(field.remote_field.field_name).column definition += " " + self.sql_create_column_inline_fk % { 'name': self._fk_constraint_name(model, field, constraint_suffix), 'column': self.quote_name(field.column), 'to_table': self.quote_name(to_table), 'to_column': self.quote_name(to_column), 'deferrable': self.connection.ops.deferrable_sql() } # Otherwise, add FK constraints later. else: self.deferred_sql.append(self._create_fk_sql(model, field, constraint_suffix)) # Build the SQL and run it sql = self.sql_create_column % { "table": self.quote_name(model._meta.db_table), "column": self.quote_name(field.column), "definition": definition, } self.execute(sql, params) # Drop the default if we need to # (Django usually does not use in-database defaults) if not self.skip_default(field) and self.effective_default(field) is not None: changes_sql, params = self._alter_column_default_sql(model, None, field, drop=True) sql = self.sql_alter_column % { "table": self.quote_name(model._meta.db_table), "changes": changes_sql, } self.execute(sql, params) # Add an index, if required self.deferred_sql.extend(self._field_indexes_sql(model, field)) # Reset connection if required if self.connection.features.connection_persists_old_columns: self.connection.close() def remove_field(self, model, field): """ Remove a field from a model. Usually involves deleting a column, but for M2Ms may involve deleting a table. """ # Special-case implicit M2M tables if field.many_to_many and field.remote_field.through._meta.auto_created: return self.delete_model(field.remote_field.through) # It might not actually have a column behind it if field.db_parameters(connection=self.connection)['type'] is None: return # Drop any FK constraints, MySQL requires explicit deletion if field.remote_field: fk_names = self._constraint_names(model, [field.column], foreign_key=True) for fk_name in fk_names: self.execute(self._delete_fk_sql(model, fk_name)) # Delete the column sql = self.sql_delete_column % { "table": self.quote_name(model._meta.db_table), "column": self.quote_name(field.column), } self.execute(sql) # Reset connection if required if self.connection.features.connection_persists_old_columns: self.connection.close() # Remove all deferred statements referencing the deleted column. for sql in list(self.deferred_sql): if isinstance(sql, Statement) and sql.references_column(model._meta.db_table, field.column): self.deferred_sql.remove(sql) def alter_field(self, model, old_field, new_field, strict=False): """ Allow a field's type, uniqueness, nullability, default, column, constraints, etc. to be modified. `old_field` is required to compute the necessary changes. If `strict` is True, raise errors if the old column does not match `old_field` precisely. """ # Ensure this field is even column-based old_db_params = old_field.db_parameters(connection=self.connection) old_type = old_db_params['type'] new_db_params = new_field.db_parameters(connection=self.connection) new_type = new_db_params['type'] if ((old_type is None and old_field.remote_field is None) or (new_type is None and new_field.remote_field is None)): raise ValueError( "Cannot alter field %s into %s - they do not properly define " "db_type (are you using a badly-written custom field?)" % (old_field, new_field), ) elif old_type is None and new_type is None and ( old_field.remote_field.through and new_field.remote_field.through and old_field.remote_field.through._meta.auto_created and new_field.remote_field.through._meta.auto_created): return self._alter_many_to_many(model, old_field, new_field, strict) elif old_type is None and new_type is None and ( old_field.remote_field.through and new_field.remote_field.through and not old_field.remote_field.through._meta.auto_created and not new_field.remote_field.through._meta.auto_created): # Both sides have through models; this is a no-op. return elif old_type is None or new_type is None: raise ValueError( "Cannot alter field %s into %s - they are not compatible types " "(you cannot alter to or from M2M fields, or add or remove " "through= on M2M fields)" % (old_field, new_field) ) self._alter_field(model, old_field, new_field, old_type, new_type, old_db_params, new_db_params, strict) def _alter_field(self, model, old_field, new_field, old_type, new_type, old_db_params, new_db_params, strict=False): """Perform a "physical" (non-ManyToMany) field update.""" # Drop any FK constraints, we'll remake them later fks_dropped = set() if old_field.remote_field and old_field.db_constraint: fk_names = self._constraint_names(model, [old_field.column], foreign_key=True) if strict and len(fk_names) != 1: raise ValueError("Found wrong number (%s) of foreign key constraints for %s.%s" % ( len(fk_names), model._meta.db_table, old_field.column, )) for fk_name in fk_names: fks_dropped.add((old_field.column,)) self.execute(self._delete_fk_sql(model, fk_name)) # Has unique been removed? if old_field.unique and (not new_field.unique or self._field_became_primary_key(old_field, new_field)): # Find the unique constraint for this field meta_constraint_names = {constraint.name for constraint in model._meta.constraints} constraint_names = self._constraint_names( model, [old_field.column], unique=True, primary_key=False, exclude=meta_constraint_names, ) if strict and len(constraint_names) != 1: raise ValueError("Found wrong number (%s) of unique constraints for %s.%s" % ( len(constraint_names), model._meta.db_table, old_field.column, )) for constraint_name in constraint_names: self.execute(self._delete_unique_sql(model, constraint_name)) # Drop incoming FK constraints if the field is a primary key or unique, # which might be a to_field target, and things are going to change. drop_foreign_keys = ( ( (old_field.primary_key and new_field.primary_key) or (old_field.unique and new_field.unique) ) and old_type != new_type ) if drop_foreign_keys: # '_meta.related_field' also contains M2M reverse fields, these # will be filtered out for _old_rel, new_rel in _related_non_m2m_objects(old_field, new_field): rel_fk_names = self._constraint_names( new_rel.related_model, [new_rel.field.column], foreign_key=True ) for fk_name in rel_fk_names: self.execute(self._delete_fk_sql(new_rel.related_model, fk_name)) # Removed an index? (no strict check, as multiple indexes are possible) # Remove indexes if db_index switched to False or a unique constraint # will now be used in lieu of an index. The following lines from the # truth table show all True cases; the rest are False: # # old_field.db_index | old_field.unique | new_field.db_index | new_field.unique # ------------------------------------------------------------------------------ # True | False | False | False # True | False | False | True # True | False | True | True if old_field.db_index and not old_field.unique and (not new_field.db_index or new_field.unique): # Find the index for this field meta_index_names = {index.name for index in model._meta.indexes} # Retrieve only BTREE indexes since this is what's created with # db_index=True. index_names = self._constraint_names( model, [old_field.column], index=True, type_=Index.suffix, exclude=meta_index_names, ) for index_name in index_names: # The only way to check if an index was created with # db_index=True or with Index(['field'], name='foo') # is to look at its name (refs #28053). self.execute(self._delete_index_sql(model, index_name)) # Change check constraints? if old_db_params['check'] != new_db_params['check'] and old_db_params['check']: meta_constraint_names = {constraint.name for constraint in model._meta.constraints} constraint_names = self._constraint_names( model, [old_field.column], check=True, exclude=meta_constraint_names, ) if strict and len(constraint_names) != 1: raise ValueError("Found wrong number (%s) of check constraints for %s.%s" % ( len(constraint_names), model._meta.db_table, old_field.column, )) for constraint_name in constraint_names: self.execute(self._delete_check_sql(model, constraint_name)) # Have they renamed the column? if old_field.column != new_field.column: self.execute(self._rename_field_sql(model._meta.db_table, old_field, new_field, new_type)) # Rename all references to the renamed column. for sql in self.deferred_sql: if isinstance(sql, Statement): sql.rename_column_references(model._meta.db_table, old_field.column, new_field.column) # Next, start accumulating actions to do actions = [] null_actions = [] post_actions = [] # Type change? if old_type != new_type: fragment, other_actions = self._alter_column_type_sql(model, old_field, new_field, new_type) actions.append(fragment) post_actions.extend(other_actions) # When changing a column NULL constraint to NOT NULL with a given # default value, we need to perform 4 steps: # 1. Add a default for new incoming writes # 2. Update existing NULL rows with new default # 3. Replace NULL constraint with NOT NULL # 4. Drop the default again. # Default change? old_default = self.effective_default(old_field) new_default = self.effective_default(new_field) needs_database_default = ( old_field.null and not new_field.null and old_default != new_default and new_default is not None and not self.skip_default(new_field) ) if needs_database_default: actions.append(self._alter_column_default_sql(model, old_field, new_field)) # Nullability change? if old_field.null != new_field.null: fragment = self._alter_column_null_sql(model, old_field, new_field) if fragment: null_actions.append(fragment) # Only if we have a default and there is a change from NULL to NOT NULL four_way_default_alteration = ( new_field.has_default() and (old_field.null and not new_field.null) ) if actions or null_actions: if not four_way_default_alteration: # If we don't have to do a 4-way default alteration we can # directly run a (NOT) NULL alteration actions = actions + null_actions # Combine actions together if we can (e.g. postgres) if self.connection.features.supports_combined_alters and actions: sql, params = tuple(zip(*actions)) actions = [(", ".join(sql), sum(params, []))] # Apply those actions for sql, params in actions: self.execute( self.sql_alter_column % { "table": self.quote_name(model._meta.db_table), "changes": sql, }, params, ) if four_way_default_alteration: # Update existing rows with default value self.execute( self.sql_update_with_default % { "table": self.quote_name(model._meta.db_table), "column": self.quote_name(new_field.column), "default": "%s", }, [new_default], ) # Since we didn't run a NOT NULL change before we need to do it # now for sql, params in null_actions: self.execute( self.sql_alter_column % { "table": self.quote_name(model._meta.db_table), "changes": sql, }, params, ) if post_actions: for sql, params in post_actions: self.execute(sql, params) # If primary_key changed to False, delete the primary key constraint. if old_field.primary_key and not new_field.primary_key: self._delete_primary_key(model, strict) # Added a unique? if self._unique_should_be_added(old_field, new_field): self.execute(self._create_unique_sql(model, [new_field.column])) # Added an index? Add an index if db_index switched to True or a unique # constraint will no longer be used in lieu of an index. The following # lines from the truth table show all True cases; the rest are False: # # old_field.db_index | old_field.unique | new_field.db_index | new_field.unique # ------------------------------------------------------------------------------ # False | False | True | False # False | True | True | False # True | True | True | False if (not old_field.db_index or old_field.unique) and new_field.db_index and not new_field.unique: self.execute(self._create_index_sql(model, [new_field])) # Type alteration on primary key? Then we need to alter the column # referring to us. rels_to_update = [] if old_field.primary_key and new_field.primary_key and old_type != new_type: rels_to_update.extend(_related_non_m2m_objects(old_field, new_field)) # Changed to become primary key? if self._field_became_primary_key(old_field, new_field): # Make the new one self.execute(self._create_primary_key_sql(model, new_field)) # Update all referencing columns rels_to_update.extend(_related_non_m2m_objects(old_field, new_field)) # Handle our type alters on the other end of rels from the PK stuff above for old_rel, new_rel in rels_to_update: rel_db_params = new_rel.field.db_parameters(connection=self.connection) rel_type = rel_db_params['type'] fragment, other_actions = self._alter_column_type_sql( new_rel.related_model, old_rel.field, new_rel.field, rel_type ) self.execute( self.sql_alter_column % { "table": self.quote_name(new_rel.related_model._meta.db_table), "changes": fragment[0], }, fragment[1], ) for sql, params in other_actions: self.execute(sql, params) # Does it have a foreign key? if (new_field.remote_field and (fks_dropped or not old_field.remote_field or not old_field.db_constraint) and new_field.db_constraint): self.execute(self._create_fk_sql(model, new_field, "_fk_%(to_table)s_%(to_column)s")) # Rebuild FKs that pointed to us if we previously had to drop them if drop_foreign_keys: for rel in new_field.model._meta.related_objects: if _is_relevant_relation(rel, new_field) and rel.field.db_constraint: self.execute(self._create_fk_sql(rel.related_model, rel.field, "_fk")) # Does it have check constraints we need to add? if old_db_params['check'] != new_db_params['check'] and new_db_params['check']: constraint_name = self._create_index_name(model._meta.db_table, [new_field.column], suffix='_check') self.execute(self._create_check_sql(model, constraint_name, new_db_params['check'])) # Drop the default if we need to # (Django usually does not use in-database defaults) if needs_database_default: changes_sql, params = self._alter_column_default_sql(model, old_field, new_field, drop=True) sql = self.sql_alter_column % { "table": self.quote_name(model._meta.db_table), "changes": changes_sql, } self.execute(sql, params) # Reset connection if required if self.connection.features.connection_persists_old_columns: self.connection.close() def _alter_column_null_sql(self, model, old_field, new_field): """ Hook to specialize column null alteration. Return a (sql, params) fragment to set a column to null or non-null as required by new_field, or None if no changes are required. """ if (self.connection.features.interprets_empty_strings_as_nulls and new_field.get_internal_type() in ("CharField", "TextField")): # The field is nullable in the database anyway, leave it alone. return else: new_db_params = new_field.db_parameters(connection=self.connection) sql = self.sql_alter_column_null if new_field.null else self.sql_alter_column_not_null return ( sql % { 'column': self.quote_name(new_field.column), 'type': new_db_params['type'], }, [], ) def _alter_column_default_sql(self, model, old_field, new_field, drop=False): """ Hook to specialize column default alteration. Return a (sql, params) fragment to add or drop (depending on the drop argument) a default to new_field's column. """ new_default = self.effective_default(new_field) default = '%s' params = [new_default] if drop: params = [] elif self.connection.features.requires_literal_defaults: # Some databases (Oracle) can't take defaults as a parameter # If this is the case, the SchemaEditor for that database should # implement prepare_default(). default = self.prepare_default(new_default) params = [] new_db_params = new_field.db_parameters(connection=self.connection) sql = self.sql_alter_column_no_default if drop else self.sql_alter_column_default return ( sql % { 'column': self.quote_name(new_field.column), 'type': new_db_params['type'], 'default': default, }, params, ) def _alter_column_type_sql(self, model, old_field, new_field, new_type): """ Hook to specialize column type alteration for different backends, for cases when a creation type is different to an alteration type (e.g. SERIAL in PostgreSQL, PostGIS fields). Return a two-tuple of: an SQL fragment of (sql, params) to insert into an ALTER TABLE statement and a list of extra (sql, params) tuples to run once the field is altered. """ return ( ( self.sql_alter_column_type % { "column": self.quote_name(new_field.column), "type": new_type, }, [], ), [], ) def _alter_many_to_many(self, model, old_field, new_field, strict): """Alter M2Ms to repoint their to= endpoints.""" # Rename the through table if old_field.remote_field.through._meta.db_table != new_field.remote_field.through._meta.db_table: self.alter_db_table(old_field.remote_field.through, old_field.remote_field.through._meta.db_table, new_field.remote_field.through._meta.db_table) # Repoint the FK to the other side self.alter_field( new_field.remote_field.through, # We need the field that points to the target model, so we can tell alter_field to change it - # this is m2m_reverse_field_name() (as opposed to m2m_field_name, which points to our model) old_field.remote_field.through._meta.get_field(old_field.m2m_reverse_field_name()), new_field.remote_field.through._meta.get_field(new_field.m2m_reverse_field_name()), ) self.alter_field( new_field.remote_field.through, # for self-referential models we need to alter field from the other end too old_field.remote_field.through._meta.get_field(old_field.m2m_field_name()), new_field.remote_field.through._meta.get_field(new_field.m2m_field_name()), ) def _create_index_name(self, table_name, column_names, suffix=""): """ Generate a unique name for an index/unique constraint. The name is divided into 3 parts: the table name, the column names, and a unique digest and suffix. """ _, table_name = split_identifier(table_name) hash_suffix_part = '%s%s' % (names_digest(table_name, *column_names, length=8), suffix) max_length = self.connection.ops.max_name_length() or 200 # If everything fits into max_length, use that name. index_name = '%s_%s_%s' % (table_name, '_'.join(column_names), hash_suffix_part) if len(index_name) <= max_length: return index_name # Shorten a long suffix. if len(hash_suffix_part) > max_length / 3: hash_suffix_part = hash_suffix_part[:max_length // 3] other_length = (max_length - len(hash_suffix_part)) // 2 - 1 index_name = '%s_%s_%s' % ( table_name[:other_length], '_'.join(column_names)[:other_length], hash_suffix_part, ) # Prepend D if needed to prevent the name from starting with an # underscore or a number (not permitted on Oracle). if index_name[0] == "_" or index_name[0].isdigit(): index_name = "D%s" % index_name[:-1] return index_name def _get_index_tablespace_sql(self, model, fields, db_tablespace=None): if db_tablespace is None: if len(fields) == 1 and fields[0].db_tablespace: db_tablespace = fields[0].db_tablespace elif model._meta.db_tablespace: db_tablespace = model._meta.db_tablespace if db_tablespace is not None: return ' ' + self.connection.ops.tablespace_sql(db_tablespace) return '' def _create_index_sql(self, model, fields, *, name=None, suffix='', using='', db_tablespace=None, col_suffixes=(), sql=None, opclasses=(), condition=None): """ Return the SQL statement to create the index for one or several fields. `sql` can be specified if the syntax differs from the standard (GIS indexes, ...). """ tablespace_sql = self._get_index_tablespace_sql(model, fields, db_tablespace=db_tablespace) columns = [field.column for field in fields] sql_create_index = sql or self.sql_create_index table = model._meta.db_table def create_index_name(*args, **kwargs): nonlocal name if name is None: name = self._create_index_name(*args, **kwargs) return self.quote_name(name) return Statement( sql_create_index, table=Table(table, self.quote_name), name=IndexName(table, columns, suffix, create_index_name), using=using, columns=self._index_columns(table, columns, col_suffixes, opclasses), extra=tablespace_sql, condition=(' WHERE ' + condition) if condition else '', ) def _delete_index_sql(self, model, name): return Statement( self.sql_delete_index, table=Table(model._meta.db_table, self.quote_name), name=self.quote_name(name), ) def _index_columns(self, table, columns, col_suffixes, opclasses): return Columns(table, columns, self.quote_name, col_suffixes=col_suffixes) def _model_indexes_sql(self, model): """ Return a list of all index SQL statements (field indexes, index_together, Meta.indexes) for the specified model. """ if not model._meta.managed or model._meta.proxy or model._meta.swapped: return [] output = [] for field in model._meta.local_fields: output.extend(self._field_indexes_sql(model, field)) for field_names in model._meta.index_together: fields = [model._meta.get_field(field) for field in field_names] output.append(self._create_index_sql(model, fields, suffix="_idx")) for index in model._meta.indexes: output.append(index.create_sql(model, self)) return output def _field_indexes_sql(self, model, field): """ Return a list of all index SQL statements for the specified field. """ output = [] if self._field_should_be_indexed(model, field): output.append(self._create_index_sql(model, [field])) return output def _field_should_be_indexed(self, model, field): return field.db_index and not field.unique def _field_became_primary_key(self, old_field, new_field): return not old_field.primary_key and new_field.primary_key def _unique_should_be_added(self, old_field, new_field): return (not old_field.unique and new_field.unique) or ( old_field.primary_key and not new_field.primary_key and new_field.unique ) def _rename_field_sql(self, table, old_field, new_field, new_type): return self.sql_rename_column % { "table": self.quote_name(table), "old_column": self.quote_name(old_field.column), "new_column": self.quote_name(new_field.column), "type": new_type, } def _create_fk_sql(self, model, field, suffix): table = Table(model._meta.db_table, self.quote_name) name = self._fk_constraint_name(model, field, suffix) column = Columns(model._meta.db_table, [field.column], self.quote_name) to_table = Table(field.target_field.model._meta.db_table, self.quote_name) to_column = Columns(field.target_field.model._meta.db_table, [field.target_field.column], self.quote_name) deferrable = self.connection.ops.deferrable_sql() return Statement( self.sql_create_fk, table=table, name=name, column=column, to_table=to_table, to_column=to_column, deferrable=deferrable, ) def _fk_constraint_name(self, model, field, suffix): def create_fk_name(*args, **kwargs): return self.quote_name(self._create_index_name(*args, **kwargs)) return ForeignKeyName( model._meta.db_table, [field.column], split_identifier(field.target_field.model._meta.db_table)[1], [field.target_field.column], suffix, create_fk_name, ) def _delete_fk_sql(self, model, name): return self._delete_constraint_sql(self.sql_delete_fk, model, name) def _unique_sql(self, model, fields, name, condition=None): if condition: # Databases support conditional unique constraints via a unique # index. sql = self._create_unique_sql(model, fields, name=name, condition=condition) if sql: self.deferred_sql.append(sql) return None constraint = self.sql_unique_constraint % { 'columns': ', '.join(map(self.quote_name, fields)), } return self.sql_constraint % { 'name': self.quote_name(name), 'constraint': constraint, } def _create_unique_sql(self, model, columns, name=None, condition=None): def create_unique_name(*args, **kwargs): return self.quote_name(self._create_index_name(*args, **kwargs)) table = Table(model._meta.db_table, self.quote_name) if name is None: name = IndexName(model._meta.db_table, columns, '_uniq', create_unique_name) else: name = self.quote_name(name) columns = Columns(table, columns, self.quote_name) if condition: return Statement( self.sql_create_unique_index, table=table, name=name, columns=columns, condition=' WHERE ' + condition, ) if self.connection.features.supports_partial_indexes else None else: return Statement( self.sql_create_unique, table=table, name=name, columns=columns, ) def _delete_unique_sql(self, model, name, condition=None): if condition: return ( self._delete_constraint_sql(self.sql_delete_index, model, name) if self.connection.features.supports_partial_indexes else None ) return self._delete_constraint_sql(self.sql_delete_unique, model, name) def _check_sql(self, name, check): return self.sql_constraint % { 'name': self.quote_name(name), 'constraint': self.sql_check_constraint % {'check': check}, } def _create_check_sql(self, model, name, check): return Statement( self.sql_create_check, table=Table(model._meta.db_table, self.quote_name), name=self.quote_name(name), check=check, ) def _delete_check_sql(self, model, name): return self._delete_constraint_sql(self.sql_delete_check, model, name) def _delete_constraint_sql(self, template, model, name): return Statement( template, table=Table(model._meta.db_table, self.quote_name), name=self.quote_name(name), ) def _constraint_names(self, model, column_names=None, unique=None, primary_key=None, index=None, foreign_key=None, check=None, type_=None, exclude=None): """Return all constraint names matching the columns and conditions.""" if column_names is not None: column_names = [ self.connection.introspection.identifier_converter(name) for name in column_names ] with self.connection.cursor() as cursor: constraints = self.connection.introspection.get_constraints(cursor, model._meta.db_table) result = [] for name, infodict in constraints.items(): if column_names is None or column_names == infodict['columns']: if unique is not None and infodict['unique'] != unique: continue if primary_key is not None and infodict['primary_key'] != primary_key: continue if index is not None and infodict['index'] != index: continue if check is not None and infodict['check'] != check: continue if foreign_key is not None and not infodict['foreign_key']: continue if type_ is not None and infodict['type'] != type_: continue if not exclude or name not in exclude: result.append(name) return result def _delete_primary_key(self, model, strict=False): constraint_names = self._constraint_names(model, primary_key=True) if strict and len(constraint_names) != 1: raise ValueError('Found wrong number (%s) of PK constraints for %s' % ( len(constraint_names), model._meta.db_table, )) for constraint_name in constraint_names: self.execute(self._delete_primary_key_sql(model, constraint_name)) def _create_primary_key_sql(self, model, field): return Statement( self.sql_create_pk, table=Table(model._meta.db_table, self.quote_name), name=self.quote_name( self._create_index_name(model._meta.db_table, [field.column], suffix="_pk") ), columns=Columns(model._meta.db_table, [field.column], self.quote_name), ) def _delete_primary_key_sql(self, model, name): return self._delete_constraint_sql(self.sql_delete_pk, model, name) def remove_procedure(self, procedure_name, param_types=()): sql = self.sql_delete_procedure % { 'procedure': self.quote_name(procedure_name), 'param_types': ','.join(param_types), } self.execute(sql)
a95d93ed2783ba7c3cdb06902c97c8c8d1e6bb2ee2b774c860b6ede61c42b81f
import operator from django.db.backends.base.features import BaseDatabaseFeatures from django.utils.functional import cached_property class DatabaseFeatures(BaseDatabaseFeatures): empty_fetchmany_value = () update_can_self_select = False allows_group_by_pk = True related_fields_match_type = True # MySQL doesn't support sliced subqueries with IN/ALL/ANY/SOME. allow_sliced_subqueries_with_in = False has_select_for_update = True supports_forward_references = False supports_regex_backreferencing = False supports_date_lookup_using_string = False can_introspect_autofield = True can_introspect_binary_field = False can_introspect_duration_field = False can_introspect_small_integer_field = True can_introspect_positive_integer_field = True introspected_boolean_field_type = 'IntegerField' supports_index_column_ordering = False supports_timezones = False requires_explicit_null_ordering_when_grouping = True allows_auto_pk_0 = False can_release_savepoints = True atomic_transactions = False can_clone_databases = True supports_temporal_subtraction = True supports_select_intersection = False supports_select_difference = False supports_slicing_ordering_in_compound = True supports_index_on_text_field = False has_case_insensitive_like = False create_test_procedure_without_params_sql = """ CREATE PROCEDURE test_procedure () BEGIN DECLARE V_I INTEGER; SET V_I = 1; END; """ create_test_procedure_with_int_param_sql = """ CREATE PROCEDURE test_procedure (P_I INTEGER) BEGIN DECLARE V_I INTEGER; SET V_I = P_I; END; """ db_functions_convert_bytes_to_str = True # Alias MySQL's TRADITIONAL to TEXT for consistency with other backends. supported_explain_formats = {'JSON', 'TEXT', 'TRADITIONAL'} # Neither MySQL nor MariaDB support partial indexes. supports_partial_indexes = False @cached_property def _mysql_storage_engine(self): "Internal method used in Django tests. Don't rely on this from your code" with self.connection.cursor() as cursor: cursor.execute("SELECT ENGINE FROM INFORMATION_SCHEMA.ENGINES WHERE SUPPORT = 'DEFAULT'") result = cursor.fetchone() return result[0] @cached_property def can_introspect_foreign_keys(self): "Confirm support for introspected foreign keys" return self._mysql_storage_engine != 'MyISAM' @cached_property def has_zoneinfo_database(self): # Test if the time zone definitions are installed. with self.connection.cursor() as cursor: cursor.execute("SELECT 1 FROM mysql.time_zone LIMIT 1") return cursor.fetchone() is not None @cached_property def is_sql_auto_is_null_enabled(self): with self.connection.cursor() as cursor: cursor.execute('SELECT @@SQL_AUTO_IS_NULL') result = cursor.fetchone() return result and result[0] == 1 @cached_property def supports_over_clause(self): if self.connection.mysql_is_mariadb: return self.connection.mysql_version >= (10, 2) return self.connection.mysql_version >= (8, 0, 2) @cached_property def supports_column_check_constraints(self): return self.connection.mysql_is_mariadb and self.connection.mysql_version >= (10, 2, 1) supports_table_check_constraints = property(operator.attrgetter('supports_column_check_constraints')) @cached_property def can_introspect_check_constraints(self): if self.connection.mysql_is_mariadb: version = self.connection.mysql_version if (version >= (10, 2, 22) and version < (10, 3)) or version >= (10, 3, 10): return True return False @cached_property def has_select_for_update_skip_locked(self): return not self.connection.mysql_is_mariadb and self.connection.mysql_version >= (8, 0, 1) has_select_for_update_nowait = property(operator.attrgetter('has_select_for_update_skip_locked')) @cached_property def needs_explain_extended(self): # EXTENDED is deprecated (and not required) in MySQL 5.7. return not self.connection.mysql_is_mariadb and self.connection.mysql_version < (5, 7) @cached_property def supports_transactions(self): """ All storage engines except MyISAM support transactions. """ return self._mysql_storage_engine != 'MyISAM' @cached_property def ignores_table_name_case(self): with self.connection.cursor() as cursor: cursor.execute('SELECT @@LOWER_CASE_TABLE_NAMES') result = cursor.fetchone() return result and result[0] != 0 @cached_property def supports_default_in_lead_lag(self): # To be added in https://jira.mariadb.org/browse/MDEV-12981. return not self.connection.mysql_is_mariadb
4c3b8f77c758b9e50e7cb81c996b10ca518828c203da4c53199999e820adb25d
from collections import namedtuple import sqlparse from MySQLdb.constants import FIELD_TYPE from django.db.backends.base.introspection import ( BaseDatabaseIntrospection, FieldInfo as BaseFieldInfo, TableInfo, ) from django.db.models.indexes import Index from django.utils.datastructures import OrderedSet FieldInfo = namedtuple('FieldInfo', BaseFieldInfo._fields + ('extra', 'is_unsigned')) InfoLine = namedtuple('InfoLine', 'col_name data_type max_len num_prec num_scale extra column_default is_unsigned') class DatabaseIntrospection(BaseDatabaseIntrospection): data_types_reverse = { FIELD_TYPE.BLOB: 'TextField', FIELD_TYPE.CHAR: 'CharField', FIELD_TYPE.DECIMAL: 'DecimalField', FIELD_TYPE.NEWDECIMAL: 'DecimalField', FIELD_TYPE.DATE: 'DateField', FIELD_TYPE.DATETIME: 'DateTimeField', FIELD_TYPE.DOUBLE: 'FloatField', FIELD_TYPE.FLOAT: 'FloatField', FIELD_TYPE.INT24: 'IntegerField', FIELD_TYPE.LONG: 'IntegerField', FIELD_TYPE.LONGLONG: 'BigIntegerField', FIELD_TYPE.SHORT: 'SmallIntegerField', FIELD_TYPE.STRING: 'CharField', FIELD_TYPE.TIME: 'TimeField', FIELD_TYPE.TIMESTAMP: 'DateTimeField', FIELD_TYPE.TINY: 'IntegerField', FIELD_TYPE.TINY_BLOB: 'TextField', FIELD_TYPE.MEDIUM_BLOB: 'TextField', FIELD_TYPE.LONG_BLOB: 'TextField', FIELD_TYPE.VAR_STRING: 'CharField', } def get_field_type(self, data_type, description): field_type = super().get_field_type(data_type, description) if 'auto_increment' in description.extra: if field_type == 'IntegerField': return 'AutoField' elif field_type == 'BigIntegerField': return 'BigAutoField' elif field_type == 'SmallIntegerField': return 'SmallAutoField' if description.is_unsigned: if field_type == 'IntegerField': return 'PositiveIntegerField' elif field_type == 'SmallIntegerField': return 'PositiveSmallIntegerField' return field_type def get_table_list(self, cursor): """Return a list of table and view names in the current database.""" cursor.execute("SHOW FULL TABLES") return [TableInfo(row[0], {'BASE TABLE': 't', 'VIEW': 'v'}.get(row[1])) for row in cursor.fetchall()] def get_table_description(self, cursor, table_name): """ Return a description of the table with the DB-API cursor.description interface." """ # information_schema database gives more accurate results for some figures: # - varchar length returned by cursor.description is an internal length, # not visible length (#5725) # - precision and scale (for decimal fields) (#5014) # - auto_increment is not available in cursor.description cursor.execute(""" SELECT column_name, data_type, character_maximum_length, numeric_precision, numeric_scale, extra, column_default, CASE WHEN column_type LIKE '%% unsigned' THEN 1 ELSE 0 END AS is_unsigned FROM information_schema.columns WHERE table_name = %s AND table_schema = DATABASE()""", [table_name]) field_info = {line[0]: InfoLine(*line) for line in cursor.fetchall()} cursor.execute("SELECT * FROM %s LIMIT 1" % self.connection.ops.quote_name(table_name)) def to_int(i): return int(i) if i is not None else i fields = [] for line in cursor.description: info = field_info[line[0]] fields.append(FieldInfo( *line[:3], to_int(info.max_len) or line[3], to_int(info.num_prec) or line[4], to_int(info.num_scale) or line[5], line[6], info.column_default, info.extra, info.is_unsigned, )) return fields def get_sequences(self, cursor, table_name, table_fields=()): for field_info in self.get_table_description(cursor, table_name): if 'auto_increment' in field_info.extra: # MySQL allows only one auto-increment column per table. return [{'table': table_name, 'column': field_info.name}] return [] def get_relations(self, cursor, table_name): """ Return a dictionary of {field_name: (field_name_other_table, other_table)} representing all relationships to the given table. """ constraints = self.get_key_columns(cursor, table_name) relations = {} for my_fieldname, other_table, other_field in constraints: relations[my_fieldname] = (other_field, other_table) return relations def get_key_columns(self, cursor, table_name): """ Return a list of (column_name, referenced_table_name, referenced_column_name) for all key columns in the given table. """ key_columns = [] cursor.execute(""" SELECT column_name, referenced_table_name, referenced_column_name FROM information_schema.key_column_usage WHERE table_name = %s AND table_schema = DATABASE() AND referenced_table_name IS NOT NULL AND referenced_column_name IS NOT NULL""", [table_name]) key_columns.extend(cursor.fetchall()) return key_columns def get_storage_engine(self, cursor, table_name): """ Retrieve the storage engine for a given table. Return the default storage engine if the table doesn't exist. """ cursor.execute( "SELECT engine " "FROM information_schema.tables " "WHERE table_name = %s", [table_name]) result = cursor.fetchone() if not result: return self.connection.features._mysql_storage_engine return result[0] def _parse_constraint_columns(self, check_clause, columns): check_columns = OrderedSet() statement = sqlparse.parse(check_clause)[0] tokens = (token for token in statement.flatten() if not token.is_whitespace) for token in tokens: if ( token.ttype == sqlparse.tokens.Name and self.connection.ops.quote_name(token.value) == token.value and token.value[1:-1] in columns ): check_columns.add(token.value[1:-1]) return check_columns def get_constraints(self, cursor, table_name): """ Retrieve any constraints or keys (unique, pk, fk, check, index) across one or more columns. """ constraints = {} # Get the actual constraint names and columns name_query = """ SELECT kc.`constraint_name`, kc.`column_name`, kc.`referenced_table_name`, kc.`referenced_column_name` FROM information_schema.key_column_usage AS kc WHERE kc.table_schema = DATABASE() AND kc.table_name = %s ORDER BY kc.`ordinal_position` """ cursor.execute(name_query, [table_name]) for constraint, column, ref_table, ref_column in cursor.fetchall(): if constraint not in constraints: constraints[constraint] = { 'columns': OrderedSet(), 'primary_key': False, 'unique': False, 'index': False, 'check': False, 'foreign_key': (ref_table, ref_column) if ref_column else None, } constraints[constraint]['columns'].add(column) # Now get the constraint types type_query = """ SELECT c.constraint_name, c.constraint_type FROM information_schema.table_constraints AS c WHERE c.table_schema = DATABASE() AND c.table_name = %s """ cursor.execute(type_query, [table_name]) for constraint, kind in cursor.fetchall(): if kind.lower() == "primary key": constraints[constraint]['primary_key'] = True constraints[constraint]['unique'] = True elif kind.lower() == "unique": constraints[constraint]['unique'] = True # Add check constraints. if self.connection.features.can_introspect_check_constraints: columns = {info.name for info in self.get_table_description(cursor, table_name)} type_query = """ SELECT c.constraint_name, c.check_clause FROM information_schema.check_constraints AS c WHERE c.constraint_schema = DATABASE() AND c.table_name = %s """ cursor.execute(type_query, [table_name]) for constraint, check_clause in cursor.fetchall(): constraints[constraint] = { 'columns': self._parse_constraint_columns(check_clause, columns), 'primary_key': False, 'unique': False, 'index': False, 'check': True, 'foreign_key': None, } # Now add in the indexes cursor.execute("SHOW INDEX FROM %s" % self.connection.ops.quote_name(table_name)) for table, non_unique, index, colseq, column, type_ in [x[:5] + (x[10],) for x in cursor.fetchall()]: if index not in constraints: constraints[index] = { 'columns': OrderedSet(), 'primary_key': False, 'unique': False, 'check': False, 'foreign_key': None, } constraints[index]['index'] = True constraints[index]['type'] = Index.suffix if type_ == 'BTREE' else type_.lower() constraints[index]['columns'].add(column) # Convert the sorted sets to lists for constraint in constraints.values(): constraint['columns'] = list(constraint['columns']) return constraints
c781567394de2efbb19647a1a0bb59cd7d3a7d29b26d885dbae0a43c56197bdd
""" MySQL database backend for Django. Requires mysqlclient: https://pypi.org/project/mysqlclient/ """ import re from django.core.exceptions import ImproperlyConfigured from django.db import utils from django.db.backends import utils as backend_utils from django.db.backends.base.base import BaseDatabaseWrapper from django.utils.asyncio import async_unsafe from django.utils.functional import cached_property try: import MySQLdb as Database except ImportError as err: raise ImproperlyConfigured( 'Error loading MySQLdb module.\n' 'Did you install mysqlclient?' ) from err from MySQLdb.constants import CLIENT, FIELD_TYPE # isort:skip from MySQLdb.converters import conversions # isort:skip # Some of these import MySQLdb, so import them after checking if it's installed. from .client import DatabaseClient # isort:skip from .creation import DatabaseCreation # isort:skip from .features import DatabaseFeatures # isort:skip from .introspection import DatabaseIntrospection # isort:skip from .operations import DatabaseOperations # isort:skip from .schema import DatabaseSchemaEditor # isort:skip from .validation import DatabaseValidation # isort:skip version = Database.version_info if version < (1, 3, 13): raise ImproperlyConfigured('mysqlclient 1.3.13 or newer is required; you have %s.' % Database.__version__) # MySQLdb returns TIME columns as timedelta -- they are more like timedelta in # terms of actual behavior as they are signed and include days -- and Django # expects time. django_conversions = { **conversions, **{FIELD_TYPE.TIME: backend_utils.typecast_time}, } # This should match the numerical portion of the version numbers (we can treat # versions like 5.0.24 and 5.0.24a as the same). server_version_re = re.compile(r'(\d{1,2})\.(\d{1,2})\.(\d{1,2})') class CursorWrapper: """ A thin wrapper around MySQLdb's normal cursor class that catches particular exception instances and reraises them with the correct types. Implemented as a wrapper, rather than a subclass, so that it isn't stuck to the particular underlying representation returned by Connection.cursor(). """ codes_for_integrityerror = ( 1048, # Column cannot be null 1690, # BIGINT UNSIGNED value is out of range 4025, # CHECK constraint failed ) def __init__(self, cursor): self.cursor = cursor def execute(self, query, args=None): try: # args is None means no string interpolation return self.cursor.execute(query, args) except Database.OperationalError as e: # Map some error codes to IntegrityError, since they seem to be # misclassified and Django would prefer the more logical place. if e.args[0] in self.codes_for_integrityerror: raise utils.IntegrityError(*tuple(e.args)) raise def executemany(self, query, args): try: return self.cursor.executemany(query, args) except Database.OperationalError as e: # Map some error codes to IntegrityError, since they seem to be # misclassified and Django would prefer the more logical place. if e.args[0] in self.codes_for_integrityerror: raise utils.IntegrityError(*tuple(e.args)) raise def __getattr__(self, attr): return getattr(self.cursor, attr) def __iter__(self): return iter(self.cursor) class DatabaseWrapper(BaseDatabaseWrapper): vendor = 'mysql' # This dictionary maps Field objects to their associated MySQL column # types, as strings. Column-type strings can contain format strings; they'll # be interpolated against the values of Field.__dict__ before being output. # If a column type is set to None, it won't be included in the output. data_types = { 'AutoField': 'integer AUTO_INCREMENT', 'BigAutoField': 'bigint AUTO_INCREMENT', 'BinaryField': 'longblob', 'BooleanField': 'bool', 'CharField': 'varchar(%(max_length)s)', 'DateField': 'date', 'DateTimeField': 'datetime(6)', 'DecimalField': 'numeric(%(max_digits)s, %(decimal_places)s)', 'DurationField': 'bigint', 'FileField': 'varchar(%(max_length)s)', 'FilePathField': 'varchar(%(max_length)s)', 'FloatField': 'double precision', 'IntegerField': 'integer', 'BigIntegerField': 'bigint', 'IPAddressField': 'char(15)', 'GenericIPAddressField': 'char(39)', 'NullBooleanField': 'bool', 'OneToOneField': 'integer', 'PositiveIntegerField': 'integer UNSIGNED', 'PositiveSmallIntegerField': 'smallint UNSIGNED', 'SlugField': 'varchar(%(max_length)s)', 'SmallAutoField': 'smallint AUTO_INCREMENT', 'SmallIntegerField': 'smallint', 'TextField': 'longtext', 'TimeField': 'time(6)', 'UUIDField': 'char(32)', } # For these columns, MySQL doesn't: # - accept default values and implicitly treats these columns as nullable # - support a database index _limited_data_types = ( 'tinyblob', 'blob', 'mediumblob', 'longblob', 'tinytext', 'text', 'mediumtext', 'longtext', 'json', ) operators = { 'exact': '= %s', 'iexact': 'LIKE %s', 'contains': 'LIKE BINARY %s', 'icontains': 'LIKE %s', 'gt': '> %s', 'gte': '>= %s', 'lt': '< %s', 'lte': '<= %s', 'startswith': 'LIKE BINARY %s', 'endswith': 'LIKE BINARY %s', 'istartswith': 'LIKE %s', 'iendswith': 'LIKE %s', } # The patterns below are used to generate SQL pattern lookup clauses when # the right-hand side of the lookup isn't a raw string (it might be an expression # or the result of a bilateral transformation). # In those cases, special characters for LIKE operators (e.g. \, *, _) should be # escaped on database side. # # Note: we use str.format() here for readability as '%' is used as a wildcard for # the LIKE operator. pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\\', '\\\\'), '%%', '\%%'), '_', '\_')" pattern_ops = { 'contains': "LIKE BINARY CONCAT('%%', {}, '%%')", 'icontains': "LIKE CONCAT('%%', {}, '%%')", 'startswith': "LIKE BINARY CONCAT({}, '%%')", 'istartswith': "LIKE CONCAT({}, '%%')", 'endswith': "LIKE BINARY CONCAT('%%', {})", 'iendswith': "LIKE CONCAT('%%', {})", } isolation_levels = { 'read uncommitted', 'read committed', 'repeatable read', 'serializable', } Database = Database SchemaEditorClass = DatabaseSchemaEditor # Classes instantiated in __init__(). client_class = DatabaseClient creation_class = DatabaseCreation features_class = DatabaseFeatures introspection_class = DatabaseIntrospection ops_class = DatabaseOperations validation_class = DatabaseValidation def get_connection_params(self): kwargs = { 'conv': django_conversions, 'charset': 'utf8', } settings_dict = self.settings_dict if settings_dict['USER']: kwargs['user'] = settings_dict['USER'] if settings_dict['NAME']: kwargs['db'] = settings_dict['NAME'] if settings_dict['PASSWORD']: kwargs['passwd'] = settings_dict['PASSWORD'] if settings_dict['HOST'].startswith('/'): kwargs['unix_socket'] = settings_dict['HOST'] elif settings_dict['HOST']: kwargs['host'] = settings_dict['HOST'] if settings_dict['PORT']: kwargs['port'] = int(settings_dict['PORT']) # We need the number of potentially affected rows after an # "UPDATE", not the number of changed rows. kwargs['client_flag'] = CLIENT.FOUND_ROWS # Validate the transaction isolation level, if specified. options = settings_dict['OPTIONS'].copy() isolation_level = options.pop('isolation_level', 'read committed') if isolation_level: isolation_level = isolation_level.lower() if isolation_level not in self.isolation_levels: raise ImproperlyConfigured( "Invalid transaction isolation level '%s' specified.\n" "Use one of %s, or None." % ( isolation_level, ', '.join("'%s'" % s for s in sorted(self.isolation_levels)) )) self.isolation_level = isolation_level kwargs.update(options) return kwargs @async_unsafe def get_new_connection(self, conn_params): return Database.connect(**conn_params) def init_connection_state(self): assignments = [] if self.features.is_sql_auto_is_null_enabled: # SQL_AUTO_IS_NULL controls whether an AUTO_INCREMENT column on # a recently inserted row will return when the field is tested # for NULL. Disabling this brings this aspect of MySQL in line # with SQL standards. assignments.append('SET SQL_AUTO_IS_NULL = 0') if self.isolation_level: assignments.append('SET SESSION TRANSACTION ISOLATION LEVEL %s' % self.isolation_level.upper()) if assignments: with self.cursor() as cursor: cursor.execute('; '.join(assignments)) @async_unsafe def create_cursor(self, name=None): cursor = self.connection.cursor() return CursorWrapper(cursor) def _rollback(self): try: BaseDatabaseWrapper._rollback(self) except Database.NotSupportedError: pass def _set_autocommit(self, autocommit): with self.wrap_database_errors: self.connection.autocommit(autocommit) def disable_constraint_checking(self): """ Disable foreign key checks, primarily for use in adding rows with forward references. Always return True to indicate constraint checks need to be re-enabled. """ self.cursor().execute('SET foreign_key_checks=0') return True def enable_constraint_checking(self): """ Re-enable foreign key checks after they have been disabled. """ # Override needs_rollback in case constraint_checks_disabled is # nested inside transaction.atomic. self.needs_rollback, needs_rollback = False, self.needs_rollback try: self.cursor().execute('SET foreign_key_checks=1') finally: self.needs_rollback = needs_rollback def check_constraints(self, table_names=None): """ Check each table name in `table_names` for rows with invalid foreign key references. This method is intended to be used in conjunction with `disable_constraint_checking()` and `enable_constraint_checking()`, to determine if rows with invalid references were entered while constraint checks were off. """ with self.cursor() as cursor: if table_names is None: table_names = self.introspection.table_names(cursor) for table_name in table_names: primary_key_column_name = self.introspection.get_primary_key_column(cursor, table_name) if not primary_key_column_name: continue key_columns = self.introspection.get_key_columns(cursor, table_name) for column_name, referenced_table_name, referenced_column_name in key_columns: cursor.execute( """ SELECT REFERRING.`%s`, REFERRING.`%s` FROM `%s` as REFERRING LEFT JOIN `%s` as REFERRED ON (REFERRING.`%s` = REFERRED.`%s`) WHERE REFERRING.`%s` IS NOT NULL AND REFERRED.`%s` IS NULL """ % ( primary_key_column_name, column_name, table_name, referenced_table_name, column_name, referenced_column_name, column_name, referenced_column_name, ) ) for bad_row in cursor.fetchall(): raise utils.IntegrityError( "The row in table '%s' with primary key '%s' has an invalid " "foreign key: %s.%s contains a value '%s' that does not " "have a corresponding value in %s.%s." % ( table_name, bad_row[0], table_name, column_name, bad_row[1], referenced_table_name, referenced_column_name, ) ) def is_usable(self): try: self.connection.ping() except Database.Error: return False else: return True @cached_property def display_name(self): return 'MariaDB' if self.mysql_is_mariadb else 'MySQL' @cached_property def data_type_check_constraints(self): if self.features.supports_column_check_constraints: return { 'PositiveIntegerField': '`%(column)s` >= 0', 'PositiveSmallIntegerField': '`%(column)s` >= 0', } return {} @cached_property def mysql_server_info(self): with self.temporary_connection() as cursor: cursor.execute('SELECT VERSION()') return cursor.fetchone()[0] @cached_property def mysql_version(self): match = server_version_re.match(self.mysql_server_info) if not match: raise Exception('Unable to determine MySQL version from version string %r' % self.mysql_server_info) return tuple(int(x) for x in match.groups()) @cached_property def mysql_is_mariadb(self): return 'mariadb' in self.mysql_server_info.lower()
181af43e6032f1aa506b25ddbd1a33e80dda220d0f11adba06c773fac09dcfe4
import uuid from django.conf import settings from django.db.backends.base.operations import BaseDatabaseOperations from django.utils import timezone from django.utils.duration import duration_microseconds from django.utils.encoding import force_str class DatabaseOperations(BaseDatabaseOperations): compiler_module = "django.db.backends.mysql.compiler" # MySQL stores positive fields as UNSIGNED ints. integer_field_ranges = { **BaseDatabaseOperations.integer_field_ranges, 'PositiveSmallIntegerField': (0, 65535), 'PositiveIntegerField': (0, 4294967295), } cast_data_types = { 'AutoField': 'signed integer', 'BigAutoField': 'signed integer', 'SmallAutoField': 'signed integer', 'CharField': 'char(%(max_length)s)', 'DecimalField': 'decimal(%(max_digits)s, %(decimal_places)s)', 'TextField': 'char', 'IntegerField': 'signed integer', 'BigIntegerField': 'signed integer', 'SmallIntegerField': 'signed integer', 'PositiveIntegerField': 'unsigned integer', 'PositiveSmallIntegerField': 'unsigned integer', } cast_char_field_without_max_length = 'char' explain_prefix = 'EXPLAIN' def date_extract_sql(self, lookup_type, field_name): # https://dev.mysql.com/doc/mysql/en/date-and-time-functions.html if lookup_type == 'week_day': # DAYOFWEEK() returns an integer, 1-7, Sunday=1. # Note: WEEKDAY() returns 0-6, Monday=0. return "DAYOFWEEK(%s)" % field_name elif lookup_type == 'week': # Override the value of default_week_format for consistency with # other database backends. # Mode 3: Monday, 1-53, with 4 or more days this year. return "WEEK(%s, 3)" % field_name elif lookup_type == 'iso_year': # Get the year part from the YEARWEEK function, which returns a # number as year * 100 + week. return "TRUNCATE(YEARWEEK(%s, 3), -2) / 100" % field_name else: # EXTRACT returns 1-53 based on ISO-8601 for the week number. return "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name) def date_trunc_sql(self, lookup_type, field_name): fields = { 'year': '%%Y-01-01', 'month': '%%Y-%%m-01', } # Use double percents to escape. if lookup_type in fields: format_str = fields[lookup_type] return "CAST(DATE_FORMAT(%s, '%s') AS DATE)" % (field_name, format_str) elif lookup_type == 'quarter': return "MAKEDATE(YEAR(%s), 1) + INTERVAL QUARTER(%s) QUARTER - INTERVAL 1 QUARTER" % ( field_name, field_name ) elif lookup_type == 'week': return "DATE_SUB(%s, INTERVAL WEEKDAY(%s) DAY)" % ( field_name, field_name ) else: return "DATE(%s)" % (field_name) def _prepare_tzname_delta(self, tzname): if '+' in tzname: return tzname[tzname.find('+'):] elif '-' in tzname: return tzname[tzname.find('-'):] return tzname def _convert_field_to_tz(self, field_name, tzname): if settings.USE_TZ and self.connection.timezone_name != tzname: field_name = "CONVERT_TZ(%s, '%s', '%s')" % ( field_name, self.connection.timezone_name, self._prepare_tzname_delta(tzname), ) return field_name def datetime_cast_date_sql(self, field_name, tzname): field_name = self._convert_field_to_tz(field_name, tzname) return "DATE(%s)" % field_name def datetime_cast_time_sql(self, field_name, tzname): field_name = self._convert_field_to_tz(field_name, tzname) return "TIME(%s)" % field_name def datetime_extract_sql(self, lookup_type, field_name, tzname): field_name = self._convert_field_to_tz(field_name, tzname) return self.date_extract_sql(lookup_type, field_name) def datetime_trunc_sql(self, lookup_type, field_name, tzname): field_name = self._convert_field_to_tz(field_name, tzname) fields = ['year', 'month', 'day', 'hour', 'minute', 'second'] format = ('%%Y-', '%%m', '-%%d', ' %%H:', '%%i', ':%%s') # Use double percents to escape. format_def = ('0000-', '01', '-01', ' 00:', '00', ':00') if lookup_type == 'quarter': return ( "CAST(DATE_FORMAT(MAKEDATE(YEAR({field_name}), 1) + " "INTERVAL QUARTER({field_name}) QUARTER - " + "INTERVAL 1 QUARTER, '%%Y-%%m-01 00:00:00') AS DATETIME)" ).format(field_name=field_name) if lookup_type == 'week': return ( "CAST(DATE_FORMAT(DATE_SUB({field_name}, " "INTERVAL WEEKDAY({field_name}) DAY), " "'%%Y-%%m-%%d 00:00:00') AS DATETIME)" ).format(field_name=field_name) try: i = fields.index(lookup_type) + 1 except ValueError: sql = field_name else: format_str = ''.join(format[:i] + format_def[i:]) sql = "CAST(DATE_FORMAT(%s, '%s') AS DATETIME)" % (field_name, format_str) return sql def time_trunc_sql(self, lookup_type, field_name): fields = { 'hour': '%%H:00:00', 'minute': '%%H:%%i:00', 'second': '%%H:%%i:%%s', } # Use double percents to escape. if lookup_type in fields: format_str = fields[lookup_type] return "CAST(DATE_FORMAT(%s, '%s') AS TIME)" % (field_name, format_str) else: return "TIME(%s)" % (field_name) def date_interval_sql(self, timedelta): return 'INTERVAL %s MICROSECOND' % duration_microseconds(timedelta) def format_for_duration_arithmetic(self, sql): return 'INTERVAL %s MICROSECOND' % sql def force_no_ordering(self): """ "ORDER BY NULL" prevents MySQL from implicitly ordering by grouped columns. If no ordering would otherwise be applied, we don't want any implicit sorting going on. """ return [(None, ("NULL", [], False))] def last_executed_query(self, cursor, sql, params): # With MySQLdb, cursor objects have an (undocumented) "_executed" # attribute where the exact query sent to the database is saved. # See MySQLdb/cursors.py in the source distribution. # MySQLdb returns string, PyMySQL bytes. return force_str(getattr(cursor, '_executed', None), errors='replace') def no_limit_value(self): # 2**64 - 1, as recommended by the MySQL documentation return 18446744073709551615 def quote_name(self, name): if name.startswith("`") and name.endswith("`"): return name # Quoting once is enough. return "`%s`" % name def random_function_sql(self): return 'RAND()' def sql_flush(self, style, tables, sequences, allow_cascade=False): # NB: The generated SQL below is specific to MySQL # 'TRUNCATE x;', 'TRUNCATE y;', 'TRUNCATE z;'... style SQL statements # to clear all tables of all data if tables: sql = ['SET FOREIGN_KEY_CHECKS = 0;'] for table in tables: sql.append('%s %s;' % ( style.SQL_KEYWORD('TRUNCATE'), style.SQL_FIELD(self.quote_name(table)), )) sql.append('SET FOREIGN_KEY_CHECKS = 1;') sql.extend(self.sequence_reset_by_name_sql(style, sequences)) return sql else: return [] def validate_autopk_value(self, value): # MySQLism: zero in AUTO_INCREMENT field does not work. Refs #17653. if value == 0: raise ValueError('The database backend does not accept 0 as a ' 'value for AutoField.') return value def adapt_datetimefield_value(self, value): if value is None: return None # Expression values are adapted by the database. if hasattr(value, 'resolve_expression'): return value # MySQL doesn't support tz-aware datetimes if timezone.is_aware(value): if settings.USE_TZ: value = timezone.make_naive(value, self.connection.timezone) else: raise ValueError("MySQL backend does not support timezone-aware datetimes when USE_TZ is False.") return str(value) def adapt_timefield_value(self, value): if value is None: return None # Expression values are adapted by the database. if hasattr(value, 'resolve_expression'): return value # MySQL doesn't support tz-aware times if timezone.is_aware(value): raise ValueError("MySQL backend does not support timezone-aware times.") return str(value) def max_name_length(self): return 64 def bulk_insert_sql(self, fields, placeholder_rows): placeholder_rows_sql = (", ".join(row) for row in placeholder_rows) values_sql = ", ".join("(%s)" % sql for sql in placeholder_rows_sql) return "VALUES " + values_sql def combine_expression(self, connector, sub_expressions): if connector == '^': return 'POW(%s)' % ','.join(sub_expressions) # Convert the result to a signed integer since MySQL's binary operators # return an unsigned integer. elif connector in ('&', '|', '<<'): return 'CONVERT(%s, SIGNED)' % connector.join(sub_expressions) elif connector == '>>': lhs, rhs = sub_expressions return 'FLOOR(%(lhs)s / POW(2, %(rhs)s))' % {'lhs': lhs, 'rhs': rhs} return super().combine_expression(connector, sub_expressions) def get_db_converters(self, expression): converters = super().get_db_converters(expression) internal_type = expression.output_field.get_internal_type() if internal_type in ['BooleanField', 'NullBooleanField']: converters.append(self.convert_booleanfield_value) elif internal_type == 'DateTimeField': if settings.USE_TZ: converters.append(self.convert_datetimefield_value) elif internal_type == 'UUIDField': converters.append(self.convert_uuidfield_value) return converters def convert_booleanfield_value(self, value, expression, connection): if value in (0, 1): value = bool(value) return value def convert_datetimefield_value(self, value, expression, connection): if value is not None: value = timezone.make_aware(value, self.connection.timezone) return value def convert_uuidfield_value(self, value, expression, connection): if value is not None: value = uuid.UUID(value) return value def binary_placeholder_sql(self, value): return '_binary %s' if value is not None and not hasattr(value, 'as_sql') else '%s' def subtract_temporals(self, internal_type, lhs, rhs): lhs_sql, lhs_params = lhs rhs_sql, rhs_params = rhs if internal_type == 'TimeField': if self.connection.mysql_is_mariadb: # MariaDB includes the microsecond component in TIME_TO_SEC as # a decimal. MySQL returns an integer without microseconds. return 'CAST((TIME_TO_SEC(%(lhs)s) - TIME_TO_SEC(%(rhs)s)) * 1000000 AS SIGNED)' % { 'lhs': lhs_sql, 'rhs': rhs_sql }, lhs_params + rhs_params return ( "((TIME_TO_SEC(%(lhs)s) * 1000000 + MICROSECOND(%(lhs)s)) -" " (TIME_TO_SEC(%(rhs)s) * 1000000 + MICROSECOND(%(rhs)s)))" ) % {'lhs': lhs_sql, 'rhs': rhs_sql}, lhs_params * 2 + rhs_params * 2 else: return "TIMESTAMPDIFF(MICROSECOND, %s, %s)" % (rhs_sql, lhs_sql), rhs_params + lhs_params def explain_query_prefix(self, format=None, **options): # Alias MySQL's TRADITIONAL to TEXT for consistency with other backends. if format and format.upper() == 'TEXT': format = 'TRADITIONAL' prefix = super().explain_query_prefix(format, **options) if format: prefix += ' FORMAT=%s' % format if self.connection.features.needs_explain_extended and format is None: # EXTENDED and FORMAT are mutually exclusive options. prefix += ' EXTENDED' return prefix def regex_lookup(self, lookup_type): # REGEXP BINARY doesn't work correctly in MySQL 8+ and REGEXP_LIKE # doesn't exist in MySQL 5.6 or in MariaDB. if self.connection.mysql_version < (8, 0, 0) or self.connection.mysql_is_mariadb: if lookup_type == 'regex': return '%s REGEXP BINARY %s' return '%s REGEXP %s' match_option = 'c' if lookup_type == 'regex' else 'i' return "REGEXP_LIKE(%%s, %%s, '%s')" % match_option def insert_statement(self, ignore_conflicts=False): return 'INSERT IGNORE INTO' if ignore_conflicts else super().insert_statement(ignore_conflicts)
15594c0c75bea2db7e26643be5a63adb0d92ce43c3c84b414ddc79531079ab1a
from django.db.backends.base.schema import BaseDatabaseSchemaEditor from django.db.models import NOT_PROVIDED class DatabaseSchemaEditor(BaseDatabaseSchemaEditor): sql_rename_table = "RENAME TABLE %(old_table)s TO %(new_table)s" sql_alter_column_null = "MODIFY %(column)s %(type)s NULL" sql_alter_column_not_null = "MODIFY %(column)s %(type)s NOT NULL" sql_alter_column_type = "MODIFY %(column)s %(type)s" # No 'CASCADE' which works as a no-op in MySQL but is undocumented sql_delete_column = "ALTER TABLE %(table)s DROP COLUMN %(column)s" sql_rename_column = "ALTER TABLE %(table)s CHANGE %(old_column)s %(new_column)s %(type)s" sql_delete_unique = "ALTER TABLE %(table)s DROP INDEX %(name)s" sql_create_column_inline_fk = ( ', ADD CONSTRAINT %(name)s FOREIGN KEY (%(column)s) ' 'REFERENCES %(to_table)s(%(to_column)s)' ) sql_delete_fk = "ALTER TABLE %(table)s DROP FOREIGN KEY %(name)s" sql_delete_index = "DROP INDEX %(name)s ON %(table)s" sql_create_pk = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s PRIMARY KEY (%(columns)s)" sql_delete_pk = "ALTER TABLE %(table)s DROP PRIMARY KEY" sql_create_index = 'CREATE INDEX %(name)s ON %(table)s (%(columns)s)%(extra)s' # The name of the column check constraint is the same as the field name on # MariaDB. Adding IF EXISTS clause prevents migrations crash. Constraint is # removed during a "MODIFY" column statement. sql_delete_check = 'ALTER TABLE %(table)s DROP CONSTRAINT IF EXISTS %(name)s' def quote_value(self, value): self.connection.ensure_connection() if isinstance(value, str): value = value.replace('%', '%%') # MySQLdb escapes to string, PyMySQL to bytes. quoted = self.connection.connection.escape(value, self.connection.connection.encoders) if isinstance(value, str) and isinstance(quoted, bytes): quoted = quoted.decode() return quoted def _is_limited_data_type(self, field): db_type = field.db_type(self.connection) return db_type is not None and db_type.lower() in self.connection._limited_data_types def skip_default(self, field): return self._is_limited_data_type(field) def add_field(self, model, field): super().add_field(model, field) # Simulate the effect of a one-off default. # field.default may be unhashable, so a set isn't used for "in" check. if self.skip_default(field) and field.default not in (None, NOT_PROVIDED): effective_default = self.effective_default(field) self.execute('UPDATE %(table)s SET %(column)s = %%s' % { 'table': self.quote_name(model._meta.db_table), 'column': self.quote_name(field.column), }, [effective_default]) def _field_should_be_indexed(self, model, field): create_index = super()._field_should_be_indexed(model, field) storage = self.connection.introspection.get_storage_engine( self.connection.cursor(), model._meta.db_table ) # No need to create an index for ForeignKey fields except if # db_constraint=False because the index from that constraint won't be # created. if (storage == "InnoDB" and create_index and field.get_internal_type() == 'ForeignKey' and field.db_constraint): return False return not self._is_limited_data_type(field) and create_index def _delete_composed_index(self, model, fields, *args): """ MySQL can remove an implicit FK index on a field when that field is covered by another index like a unique_together. "covered" here means that the more complex index starts like the simpler one. http://bugs.mysql.com/bug.php?id=37910 / Django ticket #24757 We check here before removing the [unique|index]_together if we have to recreate a FK index. """ first_field = model._meta.get_field(fields[0]) if first_field.get_internal_type() == 'ForeignKey': constraint_names = self._constraint_names(model, [first_field.column], index=True) if not constraint_names: self.execute(self._create_index_sql(model, [first_field], suffix="")) return super()._delete_composed_index(model, fields, *args) def _set_field_new_type_null_status(self, field, new_type): """ Keep the null property of the old field. If it has changed, it will be handled separately. """ if field.null: new_type += " NULL" else: new_type += " NOT NULL" return new_type def _alter_column_type_sql(self, model, old_field, new_field, new_type): new_type = self._set_field_new_type_null_status(old_field, new_type) return super()._alter_column_type_sql(model, old_field, new_field, new_type) def _rename_field_sql(self, table, old_field, new_field, new_type): new_type = self._set_field_new_type_null_status(old_field, new_type) return super()._rename_field_sql(table, old_field, new_field, new_type)
a89363c458463b1e030beee811568f038d196c23ab5ba956541984e8c3ee210a
from psycopg2.extras import Inet from django.conf import settings from django.db import NotSupportedError from django.db.backends.base.operations import BaseDatabaseOperations class DatabaseOperations(BaseDatabaseOperations): cast_char_field_without_max_length = 'varchar' explain_prefix = 'EXPLAIN' cast_data_types = { 'AutoField': 'integer', 'BigAutoField': 'bigint', 'SmallAutoField': 'smallint', } def unification_cast_sql(self, output_field): internal_type = output_field.get_internal_type() if internal_type in ("GenericIPAddressField", "IPAddressField", "TimeField", "UUIDField"): # PostgreSQL will resolve a union as type 'text' if input types are # 'unknown'. # https://www.postgresql.org/docs/current/typeconv-union-case.html # These fields cannot be implicitly cast back in the default # PostgreSQL configuration so we need to explicitly cast them. # We must also remove components of the type within brackets: # varchar(255) -> varchar. return 'CAST(%%s AS %s)' % output_field.db_type(self.connection).split('(')[0] return '%s' def date_extract_sql(self, lookup_type, field_name): # https://www.postgresql.org/docs/current/functions-datetime.html#FUNCTIONS-DATETIME-EXTRACT if lookup_type == 'week_day': # For consistency across backends, we return Sunday=1, Saturday=7. return "EXTRACT('dow' FROM %s) + 1" % field_name elif lookup_type == 'iso_year': return "EXTRACT('isoyear' FROM %s)" % field_name else: return "EXTRACT('%s' FROM %s)" % (lookup_type, field_name) def date_trunc_sql(self, lookup_type, field_name): # https://www.postgresql.org/docs/current/functions-datetime.html#FUNCTIONS-DATETIME-TRUNC return "DATE_TRUNC('%s', %s)" % (lookup_type, field_name) def _prepare_tzname_delta(self, tzname): if '+' in tzname: return tzname.replace('+', '-') elif '-' in tzname: return tzname.replace('-', '+') return tzname def _convert_field_to_tz(self, field_name, tzname): if settings.USE_TZ: field_name = "%s AT TIME ZONE '%s'" % (field_name, self._prepare_tzname_delta(tzname)) return field_name def datetime_cast_date_sql(self, field_name, tzname): field_name = self._convert_field_to_tz(field_name, tzname) return '(%s)::date' % field_name def datetime_cast_time_sql(self, field_name, tzname): field_name = self._convert_field_to_tz(field_name, tzname) return '(%s)::time' % field_name def datetime_extract_sql(self, lookup_type, field_name, tzname): field_name = self._convert_field_to_tz(field_name, tzname) return self.date_extract_sql(lookup_type, field_name) def datetime_trunc_sql(self, lookup_type, field_name, tzname): field_name = self._convert_field_to_tz(field_name, tzname) # https://www.postgresql.org/docs/current/functions-datetime.html#FUNCTIONS-DATETIME-TRUNC return "DATE_TRUNC('%s', %s)" % (lookup_type, field_name) def time_trunc_sql(self, lookup_type, field_name): return "DATE_TRUNC('%s', %s)::time" % (lookup_type, field_name) def deferrable_sql(self): return " DEFERRABLE INITIALLY DEFERRED" def fetch_returned_insert_ids(self, cursor): """ Given a cursor object that has just performed an INSERT...RETURNING statement into a table that has an auto-incrementing ID, return the list of newly created IDs. """ return [item[0] for item in cursor.fetchall()] def lookup_cast(self, lookup_type, internal_type=None): lookup = '%s' # Cast text lookups to text to allow things like filter(x__contains=4) if lookup_type in ('iexact', 'contains', 'icontains', 'startswith', 'istartswith', 'endswith', 'iendswith', 'regex', 'iregex'): if internal_type in ('IPAddressField', 'GenericIPAddressField'): lookup = "HOST(%s)" elif internal_type in ('CICharField', 'CIEmailField', 'CITextField'): lookup = '%s::citext' else: lookup = "%s::text" # Use UPPER(x) for case-insensitive lookups; it's faster. if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'): lookup = 'UPPER(%s)' % lookup return lookup def no_limit_value(self): return None def prepare_sql_script(self, sql): return [sql] def quote_name(self, name): if name.startswith('"') and name.endswith('"'): return name # Quoting once is enough. return '"%s"' % name def set_time_zone_sql(self): return "SET TIME ZONE %s" def sql_flush(self, style, tables, sequences, allow_cascade=False): if tables: # Perform a single SQL 'TRUNCATE x, y, z...;' statement. It allows # us to truncate tables referenced by a foreign key in any other # table. tables_sql = ', '.join( style.SQL_FIELD(self.quote_name(table)) for table in tables) if allow_cascade: sql = ['%s %s %s;' % ( style.SQL_KEYWORD('TRUNCATE'), tables_sql, style.SQL_KEYWORD('CASCADE'), )] else: sql = ['%s %s;' % ( style.SQL_KEYWORD('TRUNCATE'), tables_sql, )] sql.extend(self.sequence_reset_by_name_sql(style, sequences)) return sql else: return [] def sequence_reset_by_name_sql(self, style, sequences): # 'ALTER SEQUENCE sequence_name RESTART WITH 1;'... style SQL statements # to reset sequence indices sql = [] for sequence_info in sequences: table_name = sequence_info['table'] # 'id' will be the case if it's an m2m using an autogenerated # intermediate table (see BaseDatabaseIntrospection.sequence_list). column_name = sequence_info['column'] or 'id' sql.append("%s setval(pg_get_serial_sequence('%s','%s'), 1, false);" % ( style.SQL_KEYWORD('SELECT'), style.SQL_TABLE(self.quote_name(table_name)), style.SQL_FIELD(column_name), )) return sql def tablespace_sql(self, tablespace, inline=False): if inline: return "USING INDEX TABLESPACE %s" % self.quote_name(tablespace) else: return "TABLESPACE %s" % self.quote_name(tablespace) def sequence_reset_sql(self, style, model_list): from django.db import models output = [] qn = self.quote_name for model in model_list: # Use `coalesce` to set the sequence for each model to the max pk value if there are records, # or 1 if there are none. Set the `is_called` property (the third argument to `setval`) to true # if there are records (as the max pk value is already in use), otherwise set it to false. # Use pg_get_serial_sequence to get the underlying sequence name from the table name # and column name (available since PostgreSQL 8) for f in model._meta.local_fields: if isinstance(f, models.AutoField): output.append( "%s setval(pg_get_serial_sequence('%s','%s'), " "coalesce(max(%s), 1), max(%s) %s null) %s %s;" % ( style.SQL_KEYWORD('SELECT'), style.SQL_TABLE(qn(model._meta.db_table)), style.SQL_FIELD(f.column), style.SQL_FIELD(qn(f.column)), style.SQL_FIELD(qn(f.column)), style.SQL_KEYWORD('IS NOT'), style.SQL_KEYWORD('FROM'), style.SQL_TABLE(qn(model._meta.db_table)), ) ) break # Only one AutoField is allowed per model, so don't bother continuing. for f in model._meta.many_to_many: if not f.remote_field.through: output.append( "%s setval(pg_get_serial_sequence('%s','%s'), " "coalesce(max(%s), 1), max(%s) %s null) %s %s;" % ( style.SQL_KEYWORD('SELECT'), style.SQL_TABLE(qn(f.m2m_db_table())), style.SQL_FIELD('id'), style.SQL_FIELD(qn('id')), style.SQL_FIELD(qn('id')), style.SQL_KEYWORD('IS NOT'), style.SQL_KEYWORD('FROM'), style.SQL_TABLE(qn(f.m2m_db_table())) ) ) return output def prep_for_iexact_query(self, x): return x def max_name_length(self): """ Return the maximum length of an identifier. The maximum length of an identifier is 63 by default, but can be changed by recompiling PostgreSQL after editing the NAMEDATALEN macro in src/include/pg_config_manual.h. This implementation returns 63, but can be overridden by a custom database backend that inherits most of its behavior from this one. """ return 63 def distinct_sql(self, fields, params): if fields: params = [param for param_list in params for param in param_list] return (['DISTINCT ON (%s)' % ', '.join(fields)], params) else: return ['DISTINCT'], [] def last_executed_query(self, cursor, sql, params): # http://initd.org/psycopg/docs/cursor.html#cursor.query # The query attribute is a Psycopg extension to the DB API 2.0. if cursor.query is not None: return cursor.query.decode() return None def return_insert_id(self, field): return "RETURNING %s", () def bulk_insert_sql(self, fields, placeholder_rows): placeholder_rows_sql = (", ".join(row) for row in placeholder_rows) values_sql = ", ".join("(%s)" % sql for sql in placeholder_rows_sql) return "VALUES " + values_sql def adapt_datefield_value(self, value): return value def adapt_datetimefield_value(self, value): return value def adapt_timefield_value(self, value): return value def adapt_ipaddressfield_value(self, value): if value: return Inet(value) return None def subtract_temporals(self, internal_type, lhs, rhs): if internal_type == 'DateField': lhs_sql, lhs_params = lhs rhs_sql, rhs_params = rhs return "(interval '1 day' * (%s - %s))" % (lhs_sql, rhs_sql), lhs_params + rhs_params return super().subtract_temporals(internal_type, lhs, rhs) def window_frame_range_start_end(self, start=None, end=None): start_, end_ = super().window_frame_range_start_end(start, end) if (start and start < 0) or (end and end > 0): raise NotSupportedError( 'PostgreSQL only supports UNBOUNDED together with PRECEDING ' 'and FOLLOWING.' ) return start_, end_ def explain_query_prefix(self, format=None, **options): prefix = super().explain_query_prefix(format) extra = {} if format: extra['FORMAT'] = format if options: extra.update({ name.upper(): 'true' if value else 'false' for name, value in options.items() }) if extra: prefix += ' (%s)' % ', '.join('%s %s' % i for i in extra.items()) return prefix def ignore_conflicts_suffix_sql(self, ignore_conflicts=None): return 'ON CONFLICT DO NOTHING' if ignore_conflicts else super().ignore_conflicts_suffix_sql(ignore_conflicts)
e598a3a9661622bf2c8f20376751e2eb20d7a2242889a8836c34a797c7498f0e
import psycopg2 from django.db.backends.base.schema import BaseDatabaseSchemaEditor from django.db.backends.ddl_references import IndexColumns from django.db.backends.utils import strip_quotes class DatabaseSchemaEditor(BaseDatabaseSchemaEditor): sql_create_sequence = "CREATE SEQUENCE %(sequence)s" sql_delete_sequence = "DROP SEQUENCE IF EXISTS %(sequence)s CASCADE" sql_set_sequence_max = "SELECT setval('%(sequence)s', MAX(%(column)s)) FROM %(table)s" sql_set_sequence_owner = 'ALTER SEQUENCE %(sequence)s OWNED BY %(table)s.%(column)s' sql_create_index = "CREATE INDEX %(name)s ON %(table)s%(using)s (%(columns)s)%(extra)s%(condition)s" sql_delete_index = "DROP INDEX IF EXISTS %(name)s" sql_create_column_inline_fk = 'REFERENCES %(to_table)s(%(to_column)s)%(deferrable)s' # Setting the constraint to IMMEDIATE runs any deferred checks to allow # dropping it in the same transaction. sql_delete_fk = "SET CONSTRAINTS %(name)s IMMEDIATE; ALTER TABLE %(table)s DROP CONSTRAINT %(name)s" sql_delete_procedure = 'DROP FUNCTION %(procedure)s(%(param_types)s)' def quote_value(self, value): if isinstance(value, str): value = value.replace('%', '%%') # getquoted() returns a quoted bytestring of the adapted value. return psycopg2.extensions.adapt(value).getquoted().decode() def _field_indexes_sql(self, model, field): output = super()._field_indexes_sql(model, field) like_index_statement = self._create_like_index_sql(model, field) if like_index_statement is not None: output.append(like_index_statement) return output def _field_data_type(self, field): if field.is_relation: return field.rel_db_type(self.connection) return self.connection.data_types[field.get_internal_type()] def _create_like_index_sql(self, model, field): """ Return the statement to create an index with varchar operator pattern when the column type is 'varchar' or 'text', otherwise return None. """ db_type = field.db_type(connection=self.connection) if db_type is not None and (field.db_index or field.unique): # Fields with database column types of `varchar` and `text` need # a second index that specifies their operator class, which is # needed when performing correct LIKE queries outside the # C locale. See #12234. # # The same doesn't apply to array fields such as varchar[size] # and text[size], so skip them. if '[' in db_type: return None if db_type.startswith('varchar'): return self._create_index_sql(model, [field], suffix='_like', opclasses=['varchar_pattern_ops']) elif db_type.startswith('text'): return self._create_index_sql(model, [field], suffix='_like', opclasses=['text_pattern_ops']) return None def _alter_column_type_sql(self, model, old_field, new_field, new_type): self.sql_alter_column_type = 'ALTER COLUMN %(column)s TYPE %(type)s' # Cast when data type changed. if self._field_data_type(old_field) != self._field_data_type(new_field): self.sql_alter_column_type += ' USING %(column)s::%(type)s' # Make ALTER TYPE with SERIAL make sense. table = strip_quotes(model._meta.db_table) serial_fields_map = {'bigserial': 'bigint', 'serial': 'integer', 'smallserial': 'smallint'} if new_type.lower() in serial_fields_map: column = strip_quotes(new_field.column) sequence_name = "%s_%s_seq" % (table, column) return ( ( self.sql_alter_column_type % { "column": self.quote_name(column), "type": serial_fields_map[new_type.lower()], }, [], ), [ ( self.sql_delete_sequence % { "sequence": self.quote_name(sequence_name), }, [], ), ( self.sql_create_sequence % { "sequence": self.quote_name(sequence_name), }, [], ), ( self.sql_alter_column % { "table": self.quote_name(table), "changes": self.sql_alter_column_default % { "column": self.quote_name(column), "default": "nextval('%s')" % self.quote_name(sequence_name), } }, [], ), ( self.sql_set_sequence_max % { "table": self.quote_name(table), "column": self.quote_name(column), "sequence": self.quote_name(sequence_name), }, [], ), ( self.sql_set_sequence_owner % { 'table': self.quote_name(table), 'column': self.quote_name(column), 'sequence': self.quote_name(sequence_name), }, [], ), ], ) else: return super()._alter_column_type_sql(model, old_field, new_field, new_type) def _alter_field(self, model, old_field, new_field, old_type, new_type, old_db_params, new_db_params, strict=False): # Drop indexes on varchar/text/citext columns that are changing to a # different type. if (old_field.db_index or old_field.unique) and ( (old_type.startswith('varchar') and not new_type.startswith('varchar')) or (old_type.startswith('text') and not new_type.startswith('text')) or (old_type.startswith('citext') and not new_type.startswith('citext')) ): index_name = self._create_index_name(model._meta.db_table, [old_field.column], suffix='_like') self.execute(self._delete_index_sql(model, index_name)) super()._alter_field( model, old_field, new_field, old_type, new_type, old_db_params, new_db_params, strict, ) # Added an index? Create any PostgreSQL-specific indexes. if ((not (old_field.db_index or old_field.unique) and new_field.db_index) or (not old_field.unique and new_field.unique)): like_index_statement = self._create_like_index_sql(model, new_field) if like_index_statement is not None: self.execute(like_index_statement) # Removed an index? Drop any PostgreSQL-specific indexes. if old_field.unique and not (new_field.db_index or new_field.unique): index_to_remove = self._create_index_name(model._meta.db_table, [old_field.column], suffix='_like') self.execute(self._delete_index_sql(model, index_to_remove)) def _index_columns(self, table, columns, col_suffixes, opclasses): if opclasses: return IndexColumns(table, columns, self.quote_name, col_suffixes=col_suffixes, opclasses=opclasses) return super()._index_columns(table, columns, col_suffixes, opclasses)
6904ddad1c3849ed03a972cef6cfa6712f595451fa15e3921656a3f257a9bdbc
from django.db.backends.base.features import BaseDatabaseFeatures from .base import Database class DatabaseFeatures(BaseDatabaseFeatures): # SQLite can read from a cursor since SQLite 3.6.5, subject to the caveat # that statements within a connection aren't isolated from each other. See # https://sqlite.org/isolation.html. can_use_chunked_reads = True test_db_allows_multiple_connections = False supports_unspecified_pk = True supports_timezones = False max_query_params = 999 supports_mixed_date_datetime_comparisons = False can_introspect_autofield = True can_introspect_decimal_field = False can_introspect_duration_field = False can_introspect_positive_integer_field = True can_introspect_small_integer_field = True introspected_big_auto_field_type = 'AutoField' introspected_small_auto_field_type = 'AutoField' supports_transactions = True atomic_transactions = False can_rollback_ddl = True supports_atomic_references_rename = Database.sqlite_version_info >= (3, 26, 0) can_create_inline_fk = False supports_paramstyle_pyformat = False supports_sequence_reset = False can_clone_databases = True supports_temporal_subtraction = True ignores_table_name_case = True supports_cast_with_precision = False time_cast_precision = 3 can_release_savepoints = True # Is "ALTER TABLE ... RENAME COLUMN" supported? can_alter_table_rename_column = Database.sqlite_version_info >= (3, 25, 0) supports_parentheses_in_compound = False # Deferred constraint checks can be emulated on SQLite < 3.20 but not in a # reasonably performant way. supports_pragma_foreign_key_check = Database.sqlite_version_info >= (3, 20, 0) can_defer_constraint_checks = supports_pragma_foreign_key_check supports_functions_in_partial_indexes = Database.sqlite_version_info >= (3, 15, 0) supports_over_clause = Database.sqlite_version_info >= (3, 25, 0)
3f8a8eb809505b34e084e15ea31ee41e5ca615e70180467e0e72ee32a029e48b
""" SQLite backend for the sqlite3 module in the standard library. """ import datetime import decimal import functools import hashlib import math import operator import re import statistics import warnings from itertools import chain from sqlite3 import dbapi2 as Database import pytz from django.core.exceptions import ImproperlyConfigured from django.db import utils from django.db.backends import utils as backend_utils from django.db.backends.base.base import BaseDatabaseWrapper from django.utils import timezone from django.utils.asyncio import async_unsafe from django.utils.dateparse import parse_datetime, parse_time from django.utils.duration import duration_microseconds from .client import DatabaseClient # isort:skip from .creation import DatabaseCreation # isort:skip from .features import DatabaseFeatures # isort:skip from .introspection import DatabaseIntrospection # isort:skip from .operations import DatabaseOperations # isort:skip from .schema import DatabaseSchemaEditor # isort:skip def decoder(conv_func): """ Convert bytestrings from Python's sqlite3 interface to a regular string. """ return lambda s: conv_func(s.decode()) def none_guard(func): """ Decorator that returns None if any of the arguments to the decorated function are None. Many SQL functions return NULL if any of their arguments are NULL. This decorator simplifies the implementation of this for the custom functions registered below. """ @functools.wraps(func) def wrapper(*args, **kwargs): return None if None in args else func(*args, **kwargs) return wrapper def list_aggregate(function): """ Return an aggregate class that accumulates values in a list and applies the provided function to the data. """ return type('ListAggregate', (list,), {'finalize': function, 'step': list.append}) def check_sqlite_version(): if Database.sqlite_version_info < (3, 8, 3): raise ImproperlyConfigured('SQLite 3.8.3 or later is required (found %s).' % Database.sqlite_version) check_sqlite_version() Database.register_converter("bool", b'1'.__eq__) Database.register_converter("time", decoder(parse_time)) Database.register_converter("datetime", decoder(parse_datetime)) Database.register_converter("timestamp", decoder(parse_datetime)) Database.register_converter("TIMESTAMP", decoder(parse_datetime)) Database.register_adapter(decimal.Decimal, str) class DatabaseWrapper(BaseDatabaseWrapper): vendor = 'sqlite' display_name = 'SQLite' # SQLite doesn't actually support most of these types, but it "does the right # thing" given more verbose field definitions, so leave them as is so that # schema inspection is more useful. data_types = { 'AutoField': 'integer', 'BigAutoField': 'integer', 'BinaryField': 'BLOB', 'BooleanField': 'bool', 'CharField': 'varchar(%(max_length)s)', 'DateField': 'date', 'DateTimeField': 'datetime', 'DecimalField': 'decimal', 'DurationField': 'bigint', 'FileField': 'varchar(%(max_length)s)', 'FilePathField': 'varchar(%(max_length)s)', 'FloatField': 'real', 'IntegerField': 'integer', 'BigIntegerField': 'bigint', 'IPAddressField': 'char(15)', 'GenericIPAddressField': 'char(39)', 'NullBooleanField': 'bool', 'OneToOneField': 'integer', 'PositiveIntegerField': 'integer unsigned', 'PositiveSmallIntegerField': 'smallint unsigned', 'SlugField': 'varchar(%(max_length)s)', 'SmallAutoField': 'integer', 'SmallIntegerField': 'smallint', 'TextField': 'text', 'TimeField': 'time', 'UUIDField': 'char(32)', } data_type_check_constraints = { 'PositiveIntegerField': '"%(column)s" >= 0', 'PositiveSmallIntegerField': '"%(column)s" >= 0', } data_types_suffix = { 'AutoField': 'AUTOINCREMENT', 'BigAutoField': 'AUTOINCREMENT', 'SmallAutoField': 'AUTOINCREMENT', } # SQLite requires LIKE statements to include an ESCAPE clause if the value # being escaped has a percent or underscore in it. # See https://www.sqlite.org/lang_expr.html for an explanation. operators = { 'exact': '= %s', 'iexact': "LIKE %s ESCAPE '\\'", 'contains': "LIKE %s ESCAPE '\\'", 'icontains': "LIKE %s ESCAPE '\\'", 'regex': 'REGEXP %s', 'iregex': "REGEXP '(?i)' || %s", 'gt': '> %s', 'gte': '>= %s', 'lt': '< %s', 'lte': '<= %s', 'startswith': "LIKE %s ESCAPE '\\'", 'endswith': "LIKE %s ESCAPE '\\'", 'istartswith': "LIKE %s ESCAPE '\\'", 'iendswith': "LIKE %s ESCAPE '\\'", } # The patterns below are used to generate SQL pattern lookup clauses when # the right-hand side of the lookup isn't a raw string (it might be an expression # or the result of a bilateral transformation). # In those cases, special characters for LIKE operators (e.g. \, *, _) should be # escaped on database side. # # Note: we use str.format() here for readability as '%' is used as a wildcard for # the LIKE operator. pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\', '\\'), '%%', '\%%'), '_', '\_')" pattern_ops = { 'contains': r"LIKE '%%' || {} || '%%' ESCAPE '\'", 'icontains': r"LIKE '%%' || UPPER({}) || '%%' ESCAPE '\'", 'startswith': r"LIKE {} || '%%' ESCAPE '\'", 'istartswith': r"LIKE UPPER({}) || '%%' ESCAPE '\'", 'endswith': r"LIKE '%%' || {} ESCAPE '\'", 'iendswith': r"LIKE '%%' || UPPER({}) ESCAPE '\'", } Database = Database SchemaEditorClass = DatabaseSchemaEditor # Classes instantiated in __init__(). client_class = DatabaseClient creation_class = DatabaseCreation features_class = DatabaseFeatures introspection_class = DatabaseIntrospection ops_class = DatabaseOperations def get_connection_params(self): settings_dict = self.settings_dict if not settings_dict['NAME']: raise ImproperlyConfigured( "settings.DATABASES is improperly configured. " "Please supply the NAME value.") kwargs = { 'database': settings_dict['NAME'], 'detect_types': Database.PARSE_DECLTYPES | Database.PARSE_COLNAMES, **settings_dict['OPTIONS'], } # Always allow the underlying SQLite connection to be shareable # between multiple threads. The safe-guarding will be handled at a # higher level by the `BaseDatabaseWrapper.allow_thread_sharing` # property. This is necessary as the shareability is disabled by # default in pysqlite and it cannot be changed once a connection is # opened. if 'check_same_thread' in kwargs and kwargs['check_same_thread']: warnings.warn( 'The `check_same_thread` option was provided and set to ' 'True. It will be overridden with False. Use the ' '`DatabaseWrapper.allow_thread_sharing` property instead ' 'for controlling thread shareability.', RuntimeWarning ) kwargs.update({'check_same_thread': False, 'uri': True}) return kwargs @async_unsafe def get_new_connection(self, conn_params): conn = Database.connect(**conn_params) conn.create_function("django_date_extract", 2, _sqlite_datetime_extract) conn.create_function("django_date_trunc", 2, _sqlite_date_trunc) conn.create_function('django_datetime_cast_date', 3, _sqlite_datetime_cast_date) conn.create_function('django_datetime_cast_time', 3, _sqlite_datetime_cast_time) conn.create_function('django_datetime_extract', 4, _sqlite_datetime_extract) conn.create_function('django_datetime_trunc', 4, _sqlite_datetime_trunc) conn.create_function("django_time_extract", 2, _sqlite_time_extract) conn.create_function("django_time_trunc", 2, _sqlite_time_trunc) conn.create_function("django_time_diff", 2, _sqlite_time_diff) conn.create_function("django_timestamp_diff", 2, _sqlite_timestamp_diff) conn.create_function("django_format_dtdelta", 3, _sqlite_format_dtdelta) conn.create_function('regexp', 2, _sqlite_regexp) conn.create_function('ACOS', 1, none_guard(math.acos)) conn.create_function('ASIN', 1, none_guard(math.asin)) conn.create_function('ATAN', 1, none_guard(math.atan)) conn.create_function('ATAN2', 2, none_guard(math.atan2)) conn.create_function('CEILING', 1, none_guard(math.ceil)) conn.create_function('COS', 1, none_guard(math.cos)) conn.create_function('COT', 1, none_guard(lambda x: 1 / math.tan(x))) conn.create_function('DEGREES', 1, none_guard(math.degrees)) conn.create_function('EXP', 1, none_guard(math.exp)) conn.create_function('FLOOR', 1, none_guard(math.floor)) conn.create_function('LN', 1, none_guard(math.log)) conn.create_function('LOG', 2, none_guard(lambda x, y: math.log(y, x))) conn.create_function('LPAD', 3, _sqlite_lpad) conn.create_function('MD5', 1, none_guard(lambda x: hashlib.md5(x.encode()).hexdigest())) conn.create_function('MOD', 2, none_guard(math.fmod)) conn.create_function('PI', 0, lambda: math.pi) conn.create_function('POWER', 2, none_guard(operator.pow)) conn.create_function('RADIANS', 1, none_guard(math.radians)) conn.create_function('REPEAT', 2, none_guard(operator.mul)) conn.create_function('REVERSE', 1, none_guard(lambda x: x[::-1])) conn.create_function('RPAD', 3, _sqlite_rpad) conn.create_function('SHA1', 1, none_guard(lambda x: hashlib.sha1(x.encode()).hexdigest())) conn.create_function('SHA224', 1, none_guard(lambda x: hashlib.sha224(x.encode()).hexdigest())) conn.create_function('SHA256', 1, none_guard(lambda x: hashlib.sha256(x.encode()).hexdigest())) conn.create_function('SHA384', 1, none_guard(lambda x: hashlib.sha384(x.encode()).hexdigest())) conn.create_function('SHA512', 1, none_guard(lambda x: hashlib.sha512(x.encode()).hexdigest())) conn.create_function('SIGN', 1, none_guard(lambda x: (x > 0) - (x < 0))) conn.create_function('SIN', 1, none_guard(math.sin)) conn.create_function('SQRT', 1, none_guard(math.sqrt)) conn.create_function('TAN', 1, none_guard(math.tan)) conn.create_aggregate('STDDEV_POP', 1, list_aggregate(statistics.pstdev)) conn.create_aggregate('STDDEV_SAMP', 1, list_aggregate(statistics.stdev)) conn.create_aggregate('VAR_POP', 1, list_aggregate(statistics.pvariance)) conn.create_aggregate('VAR_SAMP', 1, list_aggregate(statistics.variance)) conn.execute('PRAGMA foreign_keys = ON') return conn def init_connection_state(self): pass def create_cursor(self, name=None): return self.connection.cursor(factory=SQLiteCursorWrapper) @async_unsafe def close(self): self.validate_thread_sharing() # If database is in memory, closing the connection destroys the # database. To prevent accidental data loss, ignore close requests on # an in-memory db. if not self.is_in_memory_db(): BaseDatabaseWrapper.close(self) def _savepoint_allowed(self): # When 'isolation_level' is not None, sqlite3 commits before each # savepoint; it's a bug. When it is None, savepoints don't make sense # because autocommit is enabled. The only exception is inside 'atomic' # blocks. To work around that bug, on SQLite, 'atomic' starts a # transaction explicitly rather than simply disable autocommit. return self.in_atomic_block def _set_autocommit(self, autocommit): if autocommit: level = None else: # sqlite3's internal default is ''. It's different from None. # See Modules/_sqlite/connection.c. level = '' # 'isolation_level' is a misleading API. # SQLite always runs at the SERIALIZABLE isolation level. with self.wrap_database_errors: self.connection.isolation_level = level def disable_constraint_checking(self): with self.cursor() as cursor: cursor.execute('PRAGMA foreign_keys = OFF') # Foreign key constraints cannot be turned off while in a multi- # statement transaction. Fetch the current state of the pragma # to determine if constraints are effectively disabled. enabled = cursor.execute('PRAGMA foreign_keys').fetchone()[0] return not bool(enabled) def enable_constraint_checking(self): self.cursor().execute('PRAGMA foreign_keys = ON') def check_constraints(self, table_names=None): """ Check each table name in `table_names` for rows with invalid foreign key references. This method is intended to be used in conjunction with `disable_constraint_checking()` and `enable_constraint_checking()`, to determine if rows with invalid references were entered while constraint checks were off. """ if self.features.supports_pragma_foreign_key_check: with self.cursor() as cursor: if table_names is None: violations = self.cursor().execute('PRAGMA foreign_key_check').fetchall() else: violations = chain.from_iterable( cursor.execute('PRAGMA foreign_key_check(%s)' % table_name).fetchall() for table_name in table_names ) # See https://www.sqlite.org/pragma.html#pragma_foreign_key_check for table_name, rowid, referenced_table_name, foreign_key_index in violations: foreign_key = cursor.execute( 'PRAGMA foreign_key_list(%s)' % table_name ).fetchall()[foreign_key_index] column_name, referenced_column_name = foreign_key[3:5] primary_key_column_name = self.introspection.get_primary_key_column(cursor, table_name) primary_key_value, bad_value = cursor.execute( 'SELECT %s, %s FROM %s WHERE rowid = %%s' % ( primary_key_column_name, column_name, table_name ), (rowid,), ).fetchone() raise utils.IntegrityError( "The row in table '%s' with primary key '%s' has an " "invalid foreign key: %s.%s contains a value '%s' that " "does not have a corresponding value in %s.%s." % ( table_name, primary_key_value, table_name, column_name, bad_value, referenced_table_name, referenced_column_name ) ) else: with self.cursor() as cursor: if table_names is None: table_names = self.introspection.table_names(cursor) for table_name in table_names: primary_key_column_name = self.introspection.get_primary_key_column(cursor, table_name) if not primary_key_column_name: continue key_columns = self.introspection.get_key_columns(cursor, table_name) for column_name, referenced_table_name, referenced_column_name in key_columns: cursor.execute( """ SELECT REFERRING.`%s`, REFERRING.`%s` FROM `%s` as REFERRING LEFT JOIN `%s` as REFERRED ON (REFERRING.`%s` = REFERRED.`%s`) WHERE REFERRING.`%s` IS NOT NULL AND REFERRED.`%s` IS NULL """ % ( primary_key_column_name, column_name, table_name, referenced_table_name, column_name, referenced_column_name, column_name, referenced_column_name, ) ) for bad_row in cursor.fetchall(): raise utils.IntegrityError( "The row in table '%s' with primary key '%s' has an " "invalid foreign key: %s.%s contains a value '%s' that " "does not have a corresponding value in %s.%s." % ( table_name, bad_row[0], table_name, column_name, bad_row[1], referenced_table_name, referenced_column_name, ) ) def is_usable(self): return True def _start_transaction_under_autocommit(self): """ Start a transaction explicitly in autocommit mode. Staying in autocommit mode works around a bug of sqlite3 that breaks savepoints when autocommit is disabled. """ self.cursor().execute("BEGIN") def is_in_memory_db(self): return self.creation.is_in_memory_db(self.settings_dict['NAME']) FORMAT_QMARK_REGEX = re.compile(r'(?<!%)%s') class SQLiteCursorWrapper(Database.Cursor): """ Django uses "format" style placeholders, but pysqlite2 uses "qmark" style. This fixes it -- but note that if you want to use a literal "%s" in a query, you'll need to use "%%s". """ def execute(self, query, params=None): if params is None: return Database.Cursor.execute(self, query) query = self.convert_query(query) return Database.Cursor.execute(self, query, params) def executemany(self, query, param_list): query = self.convert_query(query) return Database.Cursor.executemany(self, query, param_list) def convert_query(self, query): return FORMAT_QMARK_REGEX.sub('?', query).replace('%%', '%') def _sqlite_datetime_parse(dt, tzname=None, conn_tzname=None): if dt is None: return None try: dt = backend_utils.typecast_timestamp(dt) except (TypeError, ValueError): return None if conn_tzname: dt = dt.replace(tzinfo=pytz.timezone(conn_tzname)) if tzname is not None and tzname != conn_tzname: sign_index = tzname.find('+') + tzname.find('-') + 1 if sign_index > -1: sign = tzname[sign_index] tzname, offset = tzname.split(sign) if offset: hours, minutes = offset.split(':') offset_delta = datetime.timedelta(hours=int(hours), minutes=int(minutes)) dt += offset_delta if sign == '+' else -offset_delta dt = timezone.localtime(dt, pytz.timezone(tzname)) return dt def _sqlite_date_trunc(lookup_type, dt): dt = _sqlite_datetime_parse(dt) if dt is None: return None if lookup_type == 'year': return "%i-01-01" % dt.year elif lookup_type == 'quarter': month_in_quarter = dt.month - (dt.month - 1) % 3 return '%i-%02i-01' % (dt.year, month_in_quarter) elif lookup_type == 'month': return "%i-%02i-01" % (dt.year, dt.month) elif lookup_type == 'week': dt = dt - datetime.timedelta(days=dt.weekday()) return "%i-%02i-%02i" % (dt.year, dt.month, dt.day) elif lookup_type == 'day': return "%i-%02i-%02i" % (dt.year, dt.month, dt.day) def _sqlite_time_trunc(lookup_type, dt): if dt is None: return None try: dt = backend_utils.typecast_time(dt) except (ValueError, TypeError): return None if lookup_type == 'hour': return "%02i:00:00" % dt.hour elif lookup_type == 'minute': return "%02i:%02i:00" % (dt.hour, dt.minute) elif lookup_type == 'second': return "%02i:%02i:%02i" % (dt.hour, dt.minute, dt.second) def _sqlite_datetime_cast_date(dt, tzname, conn_tzname): dt = _sqlite_datetime_parse(dt, tzname, conn_tzname) if dt is None: return None return dt.date().isoformat() def _sqlite_datetime_cast_time(dt, tzname, conn_tzname): dt = _sqlite_datetime_parse(dt, tzname, conn_tzname) if dt is None: return None return dt.time().isoformat() def _sqlite_datetime_extract(lookup_type, dt, tzname=None, conn_tzname=None): dt = _sqlite_datetime_parse(dt, tzname, conn_tzname) if dt is None: return None if lookup_type == 'week_day': return (dt.isoweekday() % 7) + 1 elif lookup_type == 'week': return dt.isocalendar()[1] elif lookup_type == 'quarter': return math.ceil(dt.month / 3) elif lookup_type == 'iso_year': return dt.isocalendar()[0] else: return getattr(dt, lookup_type) def _sqlite_datetime_trunc(lookup_type, dt, tzname, conn_tzname): dt = _sqlite_datetime_parse(dt, tzname, conn_tzname) if dt is None: return None if lookup_type == 'year': return "%i-01-01 00:00:00" % dt.year elif lookup_type == 'quarter': month_in_quarter = dt.month - (dt.month - 1) % 3 return '%i-%02i-01 00:00:00' % (dt.year, month_in_quarter) elif lookup_type == 'month': return "%i-%02i-01 00:00:00" % (dt.year, dt.month) elif lookup_type == 'week': dt = dt - datetime.timedelta(days=dt.weekday()) return "%i-%02i-%02i 00:00:00" % (dt.year, dt.month, dt.day) elif lookup_type == 'day': return "%i-%02i-%02i 00:00:00" % (dt.year, dt.month, dt.day) elif lookup_type == 'hour': return "%i-%02i-%02i %02i:00:00" % (dt.year, dt.month, dt.day, dt.hour) elif lookup_type == 'minute': return "%i-%02i-%02i %02i:%02i:00" % (dt.year, dt.month, dt.day, dt.hour, dt.minute) elif lookup_type == 'second': return "%i-%02i-%02i %02i:%02i:%02i" % (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second) def _sqlite_time_extract(lookup_type, dt): if dt is None: return None try: dt = backend_utils.typecast_time(dt) except (ValueError, TypeError): return None return getattr(dt, lookup_type) @none_guard def _sqlite_format_dtdelta(conn, lhs, rhs): """ LHS and RHS can be either: - An integer number of microseconds - A string representing a datetime """ try: real_lhs = datetime.timedelta(0, 0, lhs) if isinstance(lhs, int) else backend_utils.typecast_timestamp(lhs) real_rhs = datetime.timedelta(0, 0, rhs) if isinstance(rhs, int) else backend_utils.typecast_timestamp(rhs) if conn.strip() == '+': out = real_lhs + real_rhs else: out = real_lhs - real_rhs except (ValueError, TypeError): return None # typecast_timestamp returns a date or a datetime without timezone. # It will be formatted as "%Y-%m-%d" or "%Y-%m-%d %H:%M:%S[.%f]" return str(out) @none_guard def _sqlite_time_diff(lhs, rhs): left = backend_utils.typecast_time(lhs) right = backend_utils.typecast_time(rhs) return ( (left.hour * 60 * 60 * 1000000) + (left.minute * 60 * 1000000) + (left.second * 1000000) + (left.microsecond) - (right.hour * 60 * 60 * 1000000) - (right.minute * 60 * 1000000) - (right.second * 1000000) - (right.microsecond) ) @none_guard def _sqlite_timestamp_diff(lhs, rhs): left = backend_utils.typecast_timestamp(lhs) right = backend_utils.typecast_timestamp(rhs) return duration_microseconds(left - right) @none_guard def _sqlite_regexp(re_pattern, re_string): return bool(re.search(re_pattern, str(re_string))) @none_guard def _sqlite_lpad(text, length, fill_text): if len(text) >= length: return text[:length] return (fill_text * length)[:length - len(text)] + text @none_guard def _sqlite_rpad(text, length, fill_text): return (text + fill_text * length)[:length]
dbe95c26a28481215088373115d5107ccf048d8ae0b561b908dc7074c9e9cc00
import fnmatch import os from pathlib import Path from subprocess import PIPE, Popen from django.apps import apps as installed_apps from django.utils.crypto import get_random_string from django.utils.encoding import DEFAULT_LOCALE_ENCODING from .base import CommandError, CommandParser def popen_wrapper(args, stdout_encoding='utf-8'): """ Friendly wrapper around Popen. Return stdout output, stderr output, and OS status code. """ try: p = Popen(args, shell=False, stdout=PIPE, stderr=PIPE, close_fds=os.name != 'nt') except OSError as err: raise CommandError('Error executing %s' % args[0]) from err output, errors = p.communicate() return ( output.decode(stdout_encoding), errors.decode(DEFAULT_LOCALE_ENCODING, errors='replace'), p.returncode ) def handle_extensions(extensions): """ Organize multiple extensions that are separated with commas or passed by using --extension/-e multiple times. For example: running 'django-admin makemessages -e js,txt -e xhtml -a' would result in an extension list: ['.js', '.txt', '.xhtml'] >>> handle_extensions(['.html', 'html,js,py,py,py,.py', 'py,.py']) {'.html', '.js', '.py'} >>> handle_extensions(['.html, txt,.tpl']) {'.html', '.tpl', '.txt'} """ ext_list = [] for ext in extensions: ext_list.extend(ext.replace(' ', '').split(',')) for i, ext in enumerate(ext_list): if not ext.startswith('.'): ext_list[i] = '.%s' % ext_list[i] return set(ext_list) def find_command(cmd, path=None, pathext=None): if path is None: path = os.environ.get('PATH', '').split(os.pathsep) if isinstance(path, str): path = [path] # check if there are funny path extensions for executables, e.g. Windows if pathext is None: pathext = os.environ.get('PATHEXT', '.COM;.EXE;.BAT;.CMD').split(os.pathsep) # don't use extensions if the command ends with one of them for ext in pathext: if cmd.endswith(ext): pathext = [''] break # check if we find the command on PATH for p in path: f = os.path.join(p, cmd) if os.path.isfile(f): return f for ext in pathext: fext = f + ext if os.path.isfile(fext): return fext return None def get_random_secret_key(): """ Return a 50 character random string usable as a SECRET_KEY setting value. """ chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)' return get_random_string(50, chars) def parse_apps_and_model_labels(labels): """ Parse a list of "app_label.ModelName" or "app_label" strings into actual objects and return a two-element tuple: (set of model classes, set of app_configs). Raise a CommandError if some specified models or apps don't exist. """ apps = set() models = set() for label in labels: if '.' in label: try: model = installed_apps.get_model(label) except LookupError: raise CommandError('Unknown model: %s' % label) models.add(model) else: try: app_config = installed_apps.get_app_config(label) except LookupError as e: raise CommandError(str(e)) apps.add(app_config) return models, apps def get_command_line_option(argv, option): """ Return the value of a command line option (which should include leading dashes, e.g. '--testrunner') from an argument list. Return None if the option wasn't passed or if the argument list couldn't be parsed. """ parser = CommandParser(add_help=False, allow_abbrev=False) parser.add_argument(option, dest='value') try: options, _ = parser.parse_known_args(argv[2:]) except CommandError: return None else: return options.value def normalize_path_patterns(patterns): """Normalize an iterable of glob style patterns based on OS.""" patterns = [os.path.normcase(p) for p in patterns] dir_suffixes = {'%s*' % path_sep for path_sep in {'/', os.sep}} norm_patterns = [] for pattern in patterns: for dir_suffix in dir_suffixes: if pattern.endswith(dir_suffix): norm_patterns.append(pattern[:-len(dir_suffix)]) break else: norm_patterns.append(pattern) return norm_patterns def is_ignored_path(path, ignore_patterns): """ Check if the given path should be ignored or not based on matching one of the glob style `ignore_patterns`. """ path = Path(path) def ignore(pattern): return fnmatch.fnmatchcase(path.name, pattern) or fnmatch.fnmatchcase(str(path), pattern) return any(ignore(pattern) for pattern in normalize_path_patterns(ignore_patterns))
80f2494bb09e9a0f658d5ffb3f6164d09780737628a94822be5202cd78a0670d
import asyncio import logging import sys import tempfile import traceback from asgiref.sync import sync_to_async from django.conf import settings from django.core import signals from django.core.exceptions import RequestAborted, RequestDataTooBig from django.core.handlers import base from django.http import ( FileResponse, HttpRequest, HttpResponse, HttpResponseBadRequest, HttpResponseServerError, QueryDict, parse_cookie, ) from django.urls import set_script_prefix from django.utils.functional import cached_property logger = logging.getLogger('django.request') class ASGIRequest(HttpRequest): """ Custom request subclass that decodes from an ASGI-standard request dict and wraps request body handling. """ # Number of seconds until a Request gives up on trying to read a request # body and aborts. body_receive_timeout = 60 def __init__(self, scope, body_file): self.scope = scope self._post_parse_error = False self._read_started = False self.resolver_match = None self.script_name = self.scope.get('root_path', '') if self.script_name and scope['path'].startswith(self.script_name): # TODO: Better is-prefix checking, slash handling? self.path_info = scope['path'][len(self.script_name):] else: self.path_info = scope['path'] # The Django path is different from ASGI scope path args, it should # combine with script name. if self.script_name: self.path = '%s/%s' % ( self.script_name.rstrip('/'), self.path_info.replace('/', '', 1), ) else: self.path = scope['path'] # HTTP basics. self.method = self.scope['method'].upper() # Ensure query string is encoded correctly. query_string = self.scope.get('query_string', '') if isinstance(query_string, bytes): query_string = query_string.decode() self.META = { 'REQUEST_METHOD': self.method, 'QUERY_STRING': query_string, 'SCRIPT_NAME': self.script_name, 'PATH_INFO': self.path_info, # WSGI-expecting code will need these for a while 'wsgi.multithread': True, 'wsgi.multiprocess': True, } if self.scope.get('client'): self.META['REMOTE_ADDR'] = self.scope['client'][0] self.META['REMOTE_HOST'] = self.META['REMOTE_ADDR'] self.META['REMOTE_PORT'] = self.scope['client'][1] if self.scope.get('server'): self.META['SERVER_NAME'] = self.scope['server'][0] self.META['SERVER_PORT'] = str(self.scope['server'][1]) else: self.META['SERVER_NAME'] = 'unknown' self.META['SERVER_PORT'] = '0' # Headers go into META. for name, value in self.scope.get('headers', []): name = name.decode('latin1') if name == 'content-length': corrected_name = 'CONTENT_LENGTH' elif name == 'content-type': corrected_name = 'CONTENT_TYPE' else: corrected_name = 'HTTP_%s' % name.upper().replace('-', '_') # HTTP/2 say only ASCII chars are allowed in headers, but decode # latin1 just in case. value = value.decode('latin1') if corrected_name in self.META: value = self.META[corrected_name] + ',' + value self.META[corrected_name] = value # Pull out request encoding, if provided. self._set_content_type_params(self.META) # Directly assign the body file to be our stream. self._stream = body_file # Other bits. self.resolver_match = None @cached_property def GET(self): return QueryDict(self.META['QUERY_STRING']) def _get_scheme(self): return self.scope.get('scheme') or super()._get_scheme() def _get_post(self): if not hasattr(self, '_post'): self._load_post_and_files() return self._post def _set_post(self, post): self._post = post def _get_files(self): if not hasattr(self, '_files'): self._load_post_and_files() return self._files POST = property(_get_post, _set_post) FILES = property(_get_files) @cached_property def COOKIES(self): return parse_cookie(self.META.get('HTTP_COOKIE', '')) class ASGIHandler(base.BaseHandler): """Handler for ASGI requests.""" request_class = ASGIRequest # Size to chunk response bodies into for multiple response messages. chunk_size = 2 ** 16 def __init__(self): super(ASGIHandler, self).__init__() self.load_middleware() async def __call__(self, scope, receive, send): """ Async entrypoint - parses the request and hands off to get_response. """ # Serve only HTTP connections. # FIXME: Allow to override this. if scope['type'] != 'http': raise ValueError( 'Django can only handle ASGI/HTTP connections, not %s.' % scope['type'] ) # Receive the HTTP request body as a stream object. try: body_file = await self.read_body(receive) except RequestAborted: return # Request is complete and can be served. set_script_prefix(self.get_script_prefix(scope)) await sync_to_async(signals.request_started.send)(sender=self.__class__, scope=scope) # Get the request and check for basic issues. request, error_response = self.create_request(scope, body_file) if request is None: await self.send_response(error_response, send) return # Get the response, using a threadpool via sync_to_async, if needed. if asyncio.iscoroutinefunction(self.get_response): response = await self.get_response(request) else: # If get_response is synchronous, run it non-blocking. response = await sync_to_async(self.get_response)(request) response._handler_class = self.__class__ # Increase chunk size on file responses (ASGI servers handles low-level # chunking). if isinstance(response, FileResponse): response.block_size = self.chunk_size # Send the response. await self.send_response(response, send) async def read_body(self, receive): """Reads a HTTP body from an ASGI connection.""" # Use the tempfile that auto rolls-over to a disk file as it fills up. body_file = tempfile.SpooledTemporaryFile(max_size=settings.FILE_UPLOAD_MAX_MEMORY_SIZE, mode='w+b') while True: message = await receive() if message['type'] == 'http.disconnect': # Early client disconnect. raise RequestAborted() # Add a body chunk from the message, if provided. if 'body' in message: body_file.write(message['body']) # Quit out if that's the end. if not message.get('more_body', False): break body_file.seek(0) return body_file def create_request(self, scope, body_file): """ Create the Request object and returns either (request, None) or (None, response) if there is an error response. """ try: return self.request_class(scope, body_file), None except UnicodeDecodeError: logger.warning( 'Bad Request (UnicodeDecodeError)', exc_info=sys.exc_info(), extra={'status_code': 400}, ) return None, HttpResponseBadRequest() except RequestDataTooBig: return None, HttpResponse('413 Payload too large', status=413) def handle_uncaught_exception(self, request, resolver, exc_info): """Last-chance handler for exceptions.""" # There's no WSGI server to catch the exception further up # if this fails, so translate it into a plain text response. try: return super().handle_uncaught_exception(request, resolver, exc_info) except Exception: return HttpResponseServerError( traceback.format_exc() if settings.DEBUG else 'Internal Server Error', content_type='text/plain', ) async def send_response(self, response, send): """Encode and send a response out over ASGI.""" # Collect cookies into headers. Have to preserve header case as there # are some non-RFC compliant clients that require e.g. Content-Type. response_headers = [] for header, value in response.items(): if isinstance(header, str): header = header.encode('ascii') if isinstance(value, str): value = value.encode('latin1') response_headers.append((bytes(header), bytes(value))) for c in response.cookies.values(): response_headers.append( (b'Set-Cookie', c.output(header='').encode('ascii').strip()) ) # Initial response message. await send({ 'type': 'http.response.start', 'status': response.status_code, 'headers': response_headers, }) # Streaming responses need to be pinned to their iterator. if response.streaming: # Access `__iter__` and not `streaming_content` directly in case # it has been overridden in a subclass. for part in response: for chunk, _ in self.chunk_bytes(part): await send({ 'type': 'http.response.body', 'body': chunk, # Ignore "more" as there may be more parts; instead, # use an empty final closing message with False. 'more_body': True, }) # Final closing message. await send({'type': 'http.response.body'}) # Other responses just need chunking. else: # Yield chunks of response. for chunk, last in self.chunk_bytes(response.content): await send({ 'type': 'http.response.body', 'body': chunk, 'more_body': not last, }) response.close() @classmethod def chunk_bytes(cls, data): """ Chunks some data up so it can be sent in reasonable size messages. Yields (chunk, last_chunk) tuples. """ position = 0 if not data: yield data, True return while position < len(data): yield ( data[position:position + cls.chunk_size], (position + cls.chunk_size) >= len(data), ) position += cls.chunk_size def get_script_prefix(self, scope): """ Return the script prefix to use from either the scope or a setting. """ if settings.FORCE_SCRIPT_NAME: return settings.FORCE_SCRIPT_NAME return scope.get('root_path', '') or ''
866535f3daa9794b0389ed33bed54436be85a0fa018b9c63e167d5d00f456a87
from django.conf import settings from .. import Tags, Warning, register SECRET_KEY_MIN_LENGTH = 50 SECRET_KEY_MIN_UNIQUE_CHARACTERS = 5 W001 = Warning( "You do not have 'django.middleware.security.SecurityMiddleware' " "in your MIDDLEWARE so the SECURE_HSTS_SECONDS, " "SECURE_CONTENT_TYPE_NOSNIFF, " "SECURE_BROWSER_XSS_FILTER, and SECURE_SSL_REDIRECT settings " "will have no effect.", id='security.W001', ) W002 = Warning( "You do not have " "'django.middleware.clickjacking.XFrameOptionsMiddleware' in your " "MIDDLEWARE, so your pages will not be served with an " "'x-frame-options' header. Unless there is a good reason for your " "site to be served in a frame, you should consider enabling this " "header to help prevent clickjacking attacks.", id='security.W002', ) W004 = Warning( "You have not set a value for the SECURE_HSTS_SECONDS setting. " "If your entire site is served only over SSL, you may want to consider " "setting a value and enabling HTTP Strict Transport Security. " "Be sure to read the documentation first; enabling HSTS carelessly " "can cause serious, irreversible problems.", id='security.W004', ) W005 = Warning( "You have not set the SECURE_HSTS_INCLUDE_SUBDOMAINS setting to True. " "Without this, your site is potentially vulnerable to attack " "via an insecure connection to a subdomain. Only set this to True if " "you are certain that all subdomains of your domain should be served " "exclusively via SSL.", id='security.W005', ) W006 = Warning( "Your SECURE_CONTENT_TYPE_NOSNIFF setting is not set to True, " "so your pages will not be served with an " "'X-Content-Type-Options: nosniff' header. " "You should consider enabling this header to prevent the " "browser from identifying content types incorrectly.", id='security.W006', ) W008 = Warning( "Your SECURE_SSL_REDIRECT setting is not set to True. " "Unless your site should be available over both SSL and non-SSL " "connections, you may want to either set this setting True " "or configure a load balancer or reverse-proxy server " "to redirect all connections to HTTPS.", id='security.W008', ) W009 = Warning( "Your SECRET_KEY has less than %(min_length)s characters or less than " "%(min_unique_chars)s unique characters. Please generate a long and random " "SECRET_KEY, otherwise many of Django's security-critical features will be " "vulnerable to attack." % { 'min_length': SECRET_KEY_MIN_LENGTH, 'min_unique_chars': SECRET_KEY_MIN_UNIQUE_CHARACTERS, }, id='security.W009', ) W018 = Warning( "You should not have DEBUG set to True in deployment.", id='security.W018', ) W019 = Warning( "You have " "'django.middleware.clickjacking.XFrameOptionsMiddleware' in your " "MIDDLEWARE, but X_FRAME_OPTIONS is not set to 'DENY'. " "The default is 'SAMEORIGIN', but unless there is a good reason for " "your site to serve other parts of itself in a frame, you should " "change it to 'DENY'.", id='security.W019', ) W020 = Warning( "ALLOWED_HOSTS must not be empty in deployment.", id='security.W020', ) W021 = Warning( "You have not set the SECURE_HSTS_PRELOAD setting to True. Without this, " "your site cannot be submitted to the browser preload list.", id='security.W021', ) def _security_middleware(): return 'django.middleware.security.SecurityMiddleware' in settings.MIDDLEWARE def _xframe_middleware(): return 'django.middleware.clickjacking.XFrameOptionsMiddleware' in settings.MIDDLEWARE @register(Tags.security, deploy=True) def check_security_middleware(app_configs, **kwargs): passed_check = _security_middleware() return [] if passed_check else [W001] @register(Tags.security, deploy=True) def check_xframe_options_middleware(app_configs, **kwargs): passed_check = _xframe_middleware() return [] if passed_check else [W002] @register(Tags.security, deploy=True) def check_sts(app_configs, **kwargs): passed_check = not _security_middleware() or settings.SECURE_HSTS_SECONDS return [] if passed_check else [W004] @register(Tags.security, deploy=True) def check_sts_include_subdomains(app_configs, **kwargs): passed_check = ( not _security_middleware() or not settings.SECURE_HSTS_SECONDS or settings.SECURE_HSTS_INCLUDE_SUBDOMAINS is True ) return [] if passed_check else [W005] @register(Tags.security, deploy=True) def check_sts_preload(app_configs, **kwargs): passed_check = ( not _security_middleware() or not settings.SECURE_HSTS_SECONDS or settings.SECURE_HSTS_PRELOAD is True ) return [] if passed_check else [W021] @register(Tags.security, deploy=True) def check_content_type_nosniff(app_configs, **kwargs): passed_check = ( not _security_middleware() or settings.SECURE_CONTENT_TYPE_NOSNIFF is True ) return [] if passed_check else [W006] @register(Tags.security, deploy=True) def check_ssl_redirect(app_configs, **kwargs): passed_check = ( not _security_middleware() or settings.SECURE_SSL_REDIRECT is True ) return [] if passed_check else [W008] @register(Tags.security, deploy=True) def check_secret_key(app_configs, **kwargs): passed_check = ( getattr(settings, 'SECRET_KEY', None) and len(set(settings.SECRET_KEY)) >= SECRET_KEY_MIN_UNIQUE_CHARACTERS and len(settings.SECRET_KEY) >= SECRET_KEY_MIN_LENGTH ) return [] if passed_check else [W009] @register(Tags.security, deploy=True) def check_debug(app_configs, **kwargs): passed_check = not settings.DEBUG return [] if passed_check else [W018] @register(Tags.security, deploy=True) def check_xframe_deny(app_configs, **kwargs): passed_check = ( not _xframe_middleware() or settings.X_FRAME_OPTIONS == 'DENY' ) return [] if passed_check else [W019] @register(Tags.security, deploy=True) def check_allowed_hosts(app_configs, **kwargs): return [] if settings.ALLOWED_HOSTS else [W020]
b746fa6a366e55cdb653ff03eaf0500dd190d302083ce2f0e52cf44b8fc26475
from django.db.backends.ddl_references import Statement, Table from django.db.models import F, Q from django.db.models.constraints import BaseConstraint from django.db.models.sql import Query __all__ = ['ExclusionConstraint'] class ExclusionConstraint(BaseConstraint): template = 'CONSTRAINT %(name)s EXCLUDE USING %(index_type)s (%(expressions)s)%(where)s' def __init__(self, *, name, expressions, index_type=None, condition=None): if index_type and index_type.lower() not in {'gist', 'spgist'}: raise ValueError( 'Exclusion constraints only support GiST or SP-GiST indexes.' ) if not expressions: raise ValueError( 'At least one expression is required to define an exclusion ' 'constraint.' ) if not all( isinstance(expr, (list, tuple)) and len(expr) == 2 for expr in expressions ): raise ValueError('The expressions must be a list of 2-tuples.') if not isinstance(condition, (type(None), Q)): raise ValueError( 'ExclusionConstraint.condition must be a Q instance.' ) self.expressions = expressions self.index_type = index_type or 'GIST' self.condition = condition super().__init__(name=name) def _get_expression_sql(self, compiler, connection, query): expressions = [] for expression, operator in self.expressions: if isinstance(expression, str): expression = F(expression) if isinstance(expression, F): expression = expression.resolve_expression(query=query, simple_col=True) else: expression = expression.resolve_expression(query=query) sql, params = expression.as_sql(compiler, connection) expressions.append('%s WITH %s' % (sql % params, operator)) return expressions def _get_condition_sql(self, compiler, schema_editor, query): if self.condition is None: return None where = query.build_where(self.condition) sql, params = where.as_sql(compiler, schema_editor.connection) return sql % tuple(schema_editor.quote_value(p) for p in params) def constraint_sql(self, model, schema_editor): query = Query(model) compiler = query.get_compiler(connection=schema_editor.connection) expressions = self._get_expression_sql(compiler, schema_editor.connection, query) condition = self._get_condition_sql(compiler, schema_editor, query) return self.template % { 'name': schema_editor.quote_name(self.name), 'index_type': self.index_type, 'expressions': ', '.join(expressions), 'where': ' WHERE (%s)' % condition if condition else '', } def create_sql(self, model, schema_editor): return Statement( 'ALTER TABLE %(table)s ADD %(constraint)s', table=Table(model._meta.db_table, schema_editor.quote_name), constraint=self.constraint_sql(model, schema_editor), ) def remove_sql(self, model, schema_editor): return schema_editor._delete_constraint_sql( schema_editor.sql_delete_check, model, schema_editor.quote_name(self.name), ) def deconstruct(self): path, args, kwargs = super().deconstruct() kwargs['expressions'] = self.expressions if self.condition is not None: kwargs['condition'] = self.condition if self.index_type.lower() != 'gist': kwargs['index_type'] = self.index_type return path, args, kwargs def __eq__(self, other): return ( isinstance(other, self.__class__) and self.name == other.name and self.index_type == other.index_type and self.expressions == other.expressions and self.condition == other.condition ) def __repr__(self): return '<%s: index_type=%s, expressions=%s%s>' % ( self.__class__.__qualname__, self.index_type, self.expressions, '' if self.condition is None else ', condition=%s' % self.condition, )
eb2c2e96dd3fcfa97139a4a385a1ab8175769378c69e0162062ec5cdf0a1919d
import hashlib import json import os import posixpath import re import warnings from urllib.parse import unquote, urldefrag, urlsplit, urlunsplit from django.conf import settings from django.contrib.staticfiles.utils import check_settings, matches_patterns from django.core.cache import ( InvalidCacheBackendError, cache as default_cache, caches, ) from django.core.exceptions import ImproperlyConfigured from django.core.files.base import ContentFile from django.core.files.storage import FileSystemStorage, get_storage_class from django.utils.deprecation import RemovedInDjango31Warning from django.utils.functional import LazyObject class StaticFilesStorage(FileSystemStorage): """ Standard file system storage for static files. The defaults for ``location`` and ``base_url`` are ``STATIC_ROOT`` and ``STATIC_URL``. """ def __init__(self, location=None, base_url=None, *args, **kwargs): if location is None: location = settings.STATIC_ROOT if base_url is None: base_url = settings.STATIC_URL check_settings(base_url) super().__init__(location, base_url, *args, **kwargs) # FileSystemStorage fallbacks to MEDIA_ROOT when location # is empty, so we restore the empty value. if not location: self.base_location = None self.location = None def path(self, name): if not self.location: raise ImproperlyConfigured("You're using the staticfiles app " "without having set the STATIC_ROOT " "setting to a filesystem path.") return super().path(name) class HashedFilesMixin: default_template = """url("%s")""" max_post_process_passes = 5 patterns = ( ("*.css", ( r"""(url\(['"]{0,1}\s*(.*?)["']{0,1}\))""", (r"""(@import\s*["']\s*(.*?)["'])""", """@import url("%s")"""), )), ) keep_intermediate_files = True def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._patterns = {} self.hashed_files = {} for extension, patterns in self.patterns: for pattern in patterns: if isinstance(pattern, (tuple, list)): pattern, template = pattern else: template = self.default_template compiled = re.compile(pattern, re.IGNORECASE) self._patterns.setdefault(extension, []).append((compiled, template)) def file_hash(self, name, content=None): """ Return a hash of the file with the given name and optional content. """ if content is None: return None md5 = hashlib.md5() for chunk in content.chunks(): md5.update(chunk) return md5.hexdigest()[:12] def hashed_name(self, name, content=None, filename=None): # `filename` is the name of file to hash if `content` isn't given. # `name` is the base name to construct the new hashed filename from. parsed_name = urlsplit(unquote(name)) clean_name = parsed_name.path.strip() filename = (filename and urlsplit(unquote(filename)).path.strip()) or clean_name opened = content is None if opened: if not self.exists(filename): raise ValueError("The file '%s' could not be found with %r." % (filename, self)) try: content = self.open(filename) except OSError: # Handle directory paths and fragments return name try: file_hash = self.file_hash(clean_name, content) finally: if opened: content.close() path, filename = os.path.split(clean_name) root, ext = os.path.splitext(filename) if file_hash is not None: file_hash = ".%s" % file_hash hashed_name = os.path.join(path, "%s%s%s" % (root, file_hash, ext)) unparsed_name = list(parsed_name) unparsed_name[2] = hashed_name # Special casing for a @font-face hack, like url(myfont.eot?#iefix") # http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax if '?#' in name and not unparsed_name[3]: unparsed_name[2] += '?' return urlunsplit(unparsed_name) def _url(self, hashed_name_func, name, force=False, hashed_files=None): """ Return the non-hashed URL in DEBUG mode. """ if settings.DEBUG and not force: hashed_name, fragment = name, '' else: clean_name, fragment = urldefrag(name) if urlsplit(clean_name).path.endswith('/'): # don't hash paths hashed_name = name else: args = (clean_name,) if hashed_files is not None: args += (hashed_files,) hashed_name = hashed_name_func(*args) final_url = super().url(hashed_name) # Special casing for a @font-face hack, like url(myfont.eot?#iefix") # http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax query_fragment = '?#' in name # [sic!] if fragment or query_fragment: urlparts = list(urlsplit(final_url)) if fragment and not urlparts[4]: urlparts[4] = fragment if query_fragment and not urlparts[3]: urlparts[2] += '?' final_url = urlunsplit(urlparts) return unquote(final_url) def url(self, name, force=False): """ Return the non-hashed URL in DEBUG mode. """ return self._url(self.stored_name, name, force) def url_converter(self, name, hashed_files, template=None): """ Return the custom URL converter for the given file name. """ if template is None: template = self.default_template def converter(matchobj): """ Convert the matched URL to a normalized and hashed URL. This requires figuring out which files the matched URL resolves to and calling the url() method of the storage. """ matched, url = matchobj.groups() # Ignore absolute/protocol-relative and data-uri URLs. if re.match(r'^[a-z]+:', url): return matched # Ignore absolute URLs that don't point to a static file (dynamic # CSS / JS?). Note that STATIC_URL cannot be empty. if url.startswith('/') and not url.startswith(settings.STATIC_URL): return matched # Strip off the fragment so a path-like fragment won't interfere. url_path, fragment = urldefrag(url) if url_path.startswith('/'): # Otherwise the condition above would have returned prematurely. assert url_path.startswith(settings.STATIC_URL) target_name = url_path[len(settings.STATIC_URL):] else: # We're using the posixpath module to mix paths and URLs conveniently. source_name = name if os.sep == '/' else name.replace(os.sep, '/') target_name = posixpath.join(posixpath.dirname(source_name), url_path) # Determine the hashed name of the target file with the storage backend. hashed_url = self._url( self._stored_name, unquote(target_name), force=True, hashed_files=hashed_files, ) transformed_url = '/'.join(url_path.split('/')[:-1] + hashed_url.split('/')[-1:]) # Restore the fragment that was stripped off earlier. if fragment: transformed_url += ('?#' if '?#' in url else '#') + fragment # Return the hashed version to the file return template % unquote(transformed_url) return converter def post_process(self, paths, dry_run=False, **options): """ Post process the given dictionary of files (called from collectstatic). Processing is actually two separate operations: 1. renaming files to include a hash of their content for cache-busting, and copying those files to the target storage. 2. adjusting files which contain references to other files so they refer to the cache-busting filenames. If either of these are performed on a file, then that file is considered post-processed. """ # don't even dare to process the files if we're in dry run mode if dry_run: return # where to store the new paths hashed_files = {} # build a list of adjustable files adjustable_paths = [ path for path in paths if matches_patterns(path, self._patterns) ] # Do a single pass first. Post-process all files once, then repeat for # adjustable files. for name, hashed_name, processed, _ in self._post_process(paths, adjustable_paths, hashed_files): yield name, hashed_name, processed paths = {path: paths[path] for path in adjustable_paths} for i in range(self.max_post_process_passes): substitutions = False for name, hashed_name, processed, subst in self._post_process(paths, adjustable_paths, hashed_files): yield name, hashed_name, processed substitutions = substitutions or subst if not substitutions: break if substitutions: yield 'All', None, RuntimeError('Max post-process passes exceeded.') # Store the processed paths self.hashed_files.update(hashed_files) def _post_process(self, paths, adjustable_paths, hashed_files): # Sort the files by directory level def path_level(name): return len(name.split(os.sep)) for name in sorted(paths, key=path_level, reverse=True): substitutions = True # use the original, local file, not the copied-but-unprocessed # file, which might be somewhere far away, like S3 storage, path = paths[name] with storage.open(path) as original_file: cleaned_name = self.clean_name(name) hash_key = self.hash_key(cleaned_name) # generate the hash with the original content, even for # adjustable files. if hash_key not in hashed_files: hashed_name = self.hashed_name(name, original_file) else: hashed_name = hashed_files[hash_key] # then get the original's file content.. if hasattr(original_file, 'seek'): original_file.seek(0) hashed_file_exists = self.exists(hashed_name) processed = False # ..to apply each replacement pattern to the content if name in adjustable_paths: old_hashed_name = hashed_name content = original_file.read().decode(settings.FILE_CHARSET) for extension, patterns in self._patterns.items(): if matches_patterns(path, (extension,)): for pattern, template in patterns: converter = self.url_converter(name, hashed_files, template) try: content = pattern.sub(converter, content) except ValueError as exc: yield name, None, exc, False if hashed_file_exists: self.delete(hashed_name) # then save the processed result content_file = ContentFile(content.encode()) if self.keep_intermediate_files: # Save intermediate file for reference self._save(hashed_name, content_file) hashed_name = self.hashed_name(name, content_file) if self.exists(hashed_name): self.delete(hashed_name) saved_name = self._save(hashed_name, content_file) hashed_name = self.clean_name(saved_name) # If the file hash stayed the same, this file didn't change if old_hashed_name == hashed_name: substitutions = False processed = True if not processed: # or handle the case in which neither processing nor # a change to the original file happened if not hashed_file_exists: processed = True saved_name = self._save(hashed_name, original_file) hashed_name = self.clean_name(saved_name) # and then set the cache accordingly hashed_files[hash_key] = hashed_name yield name, hashed_name, processed, substitutions def clean_name(self, name): return name.replace('\\', '/') def hash_key(self, name): return name def _stored_name(self, name, hashed_files): # Normalize the path to avoid multiple names for the same file like # ../foo/bar.css and ../foo/../foo/bar.css which normalize to the same # path. name = posixpath.normpath(name) cleaned_name = self.clean_name(name) hash_key = self.hash_key(cleaned_name) cache_name = hashed_files.get(hash_key) if cache_name is None: cache_name = self.clean_name(self.hashed_name(name)) return cache_name def stored_name(self, name): cleaned_name = self.clean_name(name) hash_key = self.hash_key(cleaned_name) cache_name = self.hashed_files.get(hash_key) if cache_name: return cache_name # No cached name found, recalculate it from the files. intermediate_name = name for i in range(self.max_post_process_passes + 1): cache_name = self.clean_name( self.hashed_name(name, content=None, filename=intermediate_name) ) if intermediate_name == cache_name: # Store the hashed name if there was a miss. self.hashed_files[hash_key] = cache_name return cache_name else: # Move on to the next intermediate file. intermediate_name = cache_name # If the cache name can't be determined after the max number of passes, # the intermediate files on disk may be corrupt; avoid an infinite loop. raise ValueError("The name '%s' could not be hashed with %r." % (name, self)) class ManifestFilesMixin(HashedFilesMixin): manifest_version = '1.0' # the manifest format standard manifest_name = 'staticfiles.json' manifest_strict = True keep_intermediate_files = False def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.hashed_files = self.load_manifest() def read_manifest(self): try: with self.open(self.manifest_name) as manifest: return manifest.read().decode() except FileNotFoundError: return None def load_manifest(self): content = self.read_manifest() if content is None: return {} try: stored = json.loads(content) except json.JSONDecodeError: pass else: version = stored.get('version') if version == '1.0': return stored.get('paths', {}) raise ValueError("Couldn't load manifest '%s' (version %s)" % (self.manifest_name, self.manifest_version)) def post_process(self, *args, **kwargs): self.hashed_files = {} yield from super().post_process(*args, **kwargs) self.save_manifest() def save_manifest(self): payload = {'paths': self.hashed_files, 'version': self.manifest_version} if self.exists(self.manifest_name): self.delete(self.manifest_name) contents = json.dumps(payload).encode() self._save(self.manifest_name, ContentFile(contents)) def stored_name(self, name): parsed_name = urlsplit(unquote(name)) clean_name = parsed_name.path.strip() hash_key = self.hash_key(clean_name) cache_name = self.hashed_files.get(hash_key) if cache_name is None: if self.manifest_strict: raise ValueError("Missing staticfiles manifest entry for '%s'" % clean_name) cache_name = self.clean_name(self.hashed_name(name)) unparsed_name = list(parsed_name) unparsed_name[2] = cache_name # Special casing for a @font-face hack, like url(myfont.eot?#iefix") # http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax if '?#' in name and not unparsed_name[3]: unparsed_name[2] += '?' return urlunsplit(unparsed_name) class _MappingCache: """ A small dict-like wrapper for a given cache backend instance. """ def __init__(self, cache): self.cache = cache def __setitem__(self, key, value): self.cache.set(key, value) def __getitem__(self, key): value = self.cache.get(key) if value is None: raise KeyError("Couldn't find a file name '%s'" % key) return value def clear(self): self.cache.clear() def update(self, data): self.cache.set_many(data) def get(self, key, default=None): try: return self[key] except KeyError: return default class CachedFilesMixin(HashedFilesMixin): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) try: self.hashed_files = _MappingCache(caches['staticfiles']) except InvalidCacheBackendError: # Use the default backend self.hashed_files = _MappingCache(default_cache) def hash_key(self, name): key = hashlib.md5(self.clean_name(name).encode()).hexdigest() return 'staticfiles:%s' % key class CachedStaticFilesStorage(CachedFilesMixin, StaticFilesStorage): """ A static file system storage backend which also saves hashed copies of the files it saves. """ def __init__(self, *args, **kwargs): warnings.warn( 'CachedStaticFilesStorage is deprecated in favor of ' 'ManifestStaticFilesStorage.', RemovedInDjango31Warning, stacklevel=2, ) super().__init__(*args, **kwargs) class ManifestStaticFilesStorage(ManifestFilesMixin, StaticFilesStorage): """ A static file system storage backend which also saves hashed copies of the files it saves. """ pass class ConfiguredStorage(LazyObject): def _setup(self): self._wrapped = get_storage_class(settings.STATICFILES_STORAGE)() staticfiles_storage = ConfiguredStorage()
b54b6cae2c8d6deafd56d347b1593caa044b465758479b6c850549d0584e0294
import json from django.contrib.postgres import lookups from django.contrib.postgres.forms import SimpleArrayField from django.contrib.postgres.validators import ArrayMaxLengthValidator from django.core import checks, exceptions from django.db.models import Field, IntegerField, Transform from django.db.models.lookups import Exact, In from django.utils.translation import gettext_lazy as _ from ..utils import prefix_validation_error from .mixins import CheckFieldDefaultMixin from .utils import AttributeSetter __all__ = ['ArrayField'] class ArrayField(CheckFieldDefaultMixin, Field): empty_strings_allowed = False default_error_messages = { 'item_invalid': _('Item %(nth)s in the array did not validate:'), 'nested_array_mismatch': _('Nested arrays must have the same length.'), } _default_hint = ('list', '[]') def __init__(self, base_field, size=None, **kwargs): self.base_field = base_field self.size = size if self.size: self.default_validators = [*self.default_validators, ArrayMaxLengthValidator(self.size)] # For performance, only add a from_db_value() method if the base field # implements it. if hasattr(self.base_field, 'from_db_value'): self.from_db_value = self._from_db_value super().__init__(**kwargs) @property def model(self): try: return self.__dict__['model'] except KeyError: raise AttributeError("'%s' object has no attribute 'model'" % self.__class__.__name__) @model.setter def model(self, model): self.__dict__['model'] = model self.base_field.model = model def check(self, **kwargs): errors = super().check(**kwargs) if self.base_field.remote_field: errors.append( checks.Error( 'Base field for array cannot be a related field.', obj=self, id='postgres.E002' ) ) else: # Remove the field name checks as they are not needed here. base_errors = self.base_field.check() if base_errors: messages = '\n '.join('%s (%s)' % (error.msg, error.id) for error in base_errors) errors.append( checks.Error( 'Base field for array has errors:\n %s' % messages, obj=self, id='postgres.E001' ) ) return errors def set_attributes_from_name(self, name): super().set_attributes_from_name(name) self.base_field.set_attributes_from_name(name) @property def description(self): return 'Array of %s' % self.base_field.description def db_type(self, connection): size = self.size or '' return '%s[%s]' % (self.base_field.db_type(connection), size) def get_placeholder(self, value, compiler, connection): return '%s::{}'.format(self.db_type(connection)) def get_db_prep_value(self, value, connection, prepared=False): if isinstance(value, (list, tuple)): return [self.base_field.get_db_prep_value(i, connection, prepared=False) for i in value] return value def deconstruct(self): name, path, args, kwargs = super().deconstruct() if path == 'django.contrib.postgres.fields.array.ArrayField': path = 'django.contrib.postgres.fields.ArrayField' kwargs.update({ 'base_field': self.base_field.clone(), 'size': self.size, }) return name, path, args, kwargs def to_python(self, value): if isinstance(value, str): # Assume we're deserializing vals = json.loads(value) value = [self.base_field.to_python(val) for val in vals] return value def _from_db_value(self, value, expression, connection): if value is None: return value return [ self.base_field.from_db_value(item, expression, connection) for item in value ] def value_to_string(self, obj): values = [] vals = self.value_from_object(obj) base_field = self.base_field for val in vals: if val is None: values.append(None) else: obj = AttributeSetter(base_field.attname, val) values.append(base_field.value_to_string(obj)) return json.dumps(values) def get_transform(self, name): transform = super().get_transform(name) if transform: return transform if '_' not in name: try: index = int(name) except ValueError: pass else: index += 1 # postgres uses 1-indexing return IndexTransformFactory(index, self.base_field) try: start, end = name.split('_') start = int(start) + 1 end = int(end) # don't add one here because postgres slices are weird except ValueError: pass else: return SliceTransformFactory(start, end) def validate(self, value, model_instance): super().validate(value, model_instance) for index, part in enumerate(value): try: self.base_field.validate(part, model_instance) except exceptions.ValidationError as error: raise prefix_validation_error( error, prefix=self.error_messages['item_invalid'], code='item_invalid', params={'nth': index + 1}, ) if isinstance(self.base_field, ArrayField): if len({len(i) for i in value}) > 1: raise exceptions.ValidationError( self.error_messages['nested_array_mismatch'], code='nested_array_mismatch', ) def run_validators(self, value): super().run_validators(value) for index, part in enumerate(value): try: self.base_field.run_validators(part) except exceptions.ValidationError as error: raise prefix_validation_error( error, prefix=self.error_messages['item_invalid'], code='item_invalid', params={'nth': index + 1}, ) def formfield(self, **kwargs): return super().formfield(**{ 'form_class': SimpleArrayField, 'base_field': self.base_field.formfield(), 'max_length': self.size, **kwargs, }) @ArrayField.register_lookup class ArrayContains(lookups.DataContains): def as_sql(self, qn, connection): sql, params = super().as_sql(qn, connection) sql = '%s::%s' % (sql, self.lhs.output_field.db_type(connection)) return sql, params @ArrayField.register_lookup class ArrayContainedBy(lookups.ContainedBy): def as_sql(self, qn, connection): sql, params = super().as_sql(qn, connection) sql = '%s::%s' % (sql, self.lhs.output_field.db_type(connection)) return sql, params @ArrayField.register_lookup class ArrayExact(Exact): def as_sql(self, qn, connection): sql, params = super().as_sql(qn, connection) sql = '%s::%s' % (sql, self.lhs.output_field.db_type(connection)) return sql, params @ArrayField.register_lookup class ArrayOverlap(lookups.Overlap): def as_sql(self, qn, connection): sql, params = super().as_sql(qn, connection) sql = '%s::%s' % (sql, self.lhs.output_field.db_type(connection)) return sql, params @ArrayField.register_lookup class ArrayLenTransform(Transform): lookup_name = 'len' output_field = IntegerField() def as_sql(self, compiler, connection): lhs, params = compiler.compile(self.lhs) # Distinguish NULL and empty arrays return ( 'CASE WHEN %(lhs)s IS NULL THEN NULL ELSE ' 'coalesce(array_length(%(lhs)s, 1), 0) END' ) % {'lhs': lhs}, params @ArrayField.register_lookup class ArrayInLookup(In): def get_prep_lookup(self): values = super().get_prep_lookup() if hasattr(values, 'resolve_expression'): return values # In.process_rhs() expects values to be hashable, so convert lists # to tuples. prepared_values = [] for value in values: if hasattr(value, 'resolve_expression'): prepared_values.append(value) else: prepared_values.append(tuple(value)) return prepared_values class IndexTransform(Transform): def __init__(self, index, base_field, *args, **kwargs): super().__init__(*args, **kwargs) self.index = index self.base_field = base_field def as_sql(self, compiler, connection): lhs, params = compiler.compile(self.lhs) return '%s[%%s]' % lhs, params + [self.index] @property def output_field(self): return self.base_field class IndexTransformFactory: def __init__(self, index, base_field): self.index = index self.base_field = base_field def __call__(self, *args, **kwargs): return IndexTransform(self.index, self.base_field, *args, **kwargs) class SliceTransform(Transform): def __init__(self, start, end, *args, **kwargs): super().__init__(*args, **kwargs) self.start = start self.end = end def as_sql(self, compiler, connection): lhs, params = compiler.compile(self.lhs) return '%s[%%s:%%s]' % lhs, params + [self.start, self.end] class SliceTransformFactory: def __init__(self, start, end): self.start = start self.end = end def __call__(self, *args, **kwargs): return SliceTransform(self.start, self.end, *args, **kwargs)
fcb628ed0c1ed203c627bf357f72bd027c2e8525fcae49a5085e34ac760540a2
import datetime import json from psycopg2.extras import DateRange, DateTimeTZRange, NumericRange, Range from django.contrib.postgres import forms, lookups from django.db import models from .utils import AttributeSetter __all__ = [ 'RangeField', 'IntegerRangeField', 'BigIntegerRangeField', 'DecimalRangeField', 'DateTimeRangeField', 'DateRangeField', 'FloatRangeField', 'RangeBoundary', 'RangeOperators', ] class RangeBoundary(models.Expression): """A class that represents range boundaries.""" def __init__(self, inclusive_lower=True, inclusive_upper=False): self.lower = '[' if inclusive_lower else '(' self.upper = ']' if inclusive_upper else ')' def as_sql(self, compiler, connection): return "'%s%s'" % (self.lower, self.upper), [] class RangeOperators: # https://www.postgresql.org/docs/current/functions-range.html#RANGE-OPERATORS-TABLE EQUAL = '=' NOT_EQUAL = '<>' CONTAINS = '@>' CONTAINED_BY = '<@' OVERLAPS = '&&' FULLY_LT = '<<' FULLY_GT = '>>' NOT_LT = '&>' NOT_GT = '&<' ADJACENT_TO = '-|-' class RangeField(models.Field): empty_strings_allowed = False def __init__(self, *args, **kwargs): # Initializing base_field here ensures that its model matches the model for self. if hasattr(self, 'base_field'): self.base_field = self.base_field() super().__init__(*args, **kwargs) @property def model(self): try: return self.__dict__['model'] except KeyError: raise AttributeError("'%s' object has no attribute 'model'" % self.__class__.__name__) @model.setter def model(self, model): self.__dict__['model'] = model self.base_field.model = model def get_prep_value(self, value): if value is None: return None elif isinstance(value, Range): return value elif isinstance(value, (list, tuple)): return self.range_type(value[0], value[1]) return value def to_python(self, value): if isinstance(value, str): # Assume we're deserializing vals = json.loads(value) for end in ('lower', 'upper'): if end in vals: vals[end] = self.base_field.to_python(vals[end]) value = self.range_type(**vals) elif isinstance(value, (list, tuple)): value = self.range_type(value[0], value[1]) return value def set_attributes_from_name(self, name): super().set_attributes_from_name(name) self.base_field.set_attributes_from_name(name) def value_to_string(self, obj): value = self.value_from_object(obj) if value is None: return None if value.isempty: return json.dumps({"empty": True}) base_field = self.base_field result = {"bounds": value._bounds} for end in ('lower', 'upper'): val = getattr(value, end) if val is None: result[end] = None else: obj = AttributeSetter(base_field.attname, val) result[end] = base_field.value_to_string(obj) return json.dumps(result) def formfield(self, **kwargs): kwargs.setdefault('form_class', self.form_field) return super().formfield(**kwargs) class IntegerRangeField(RangeField): base_field = models.IntegerField range_type = NumericRange form_field = forms.IntegerRangeField def db_type(self, connection): return 'int4range' class BigIntegerRangeField(RangeField): base_field = models.BigIntegerField range_type = NumericRange form_field = forms.IntegerRangeField def db_type(self, connection): return 'int8range' class DecimalRangeField(RangeField): base_field = models.DecimalField range_type = NumericRange form_field = forms.DecimalRangeField def db_type(self, connection): return 'numrange' class FloatRangeField(RangeField): system_check_deprecated_details = { 'msg': ( 'FloatRangeField is deprecated and will be removed in Django 3.1.' ), 'hint': 'Use DecimalRangeField instead.', 'id': 'fields.W902', } base_field = models.FloatField range_type = NumericRange form_field = forms.FloatRangeField def db_type(self, connection): return 'numrange' class DateTimeRangeField(RangeField): base_field = models.DateTimeField range_type = DateTimeTZRange form_field = forms.DateTimeRangeField def db_type(self, connection): return 'tstzrange' class DateRangeField(RangeField): base_field = models.DateField range_type = DateRange form_field = forms.DateRangeField def db_type(self, connection): return 'daterange' RangeField.register_lookup(lookups.DataContains) RangeField.register_lookup(lookups.ContainedBy) RangeField.register_lookup(lookups.Overlap) class DateTimeRangeContains(lookups.PostgresSimpleLookup): """ Lookup for Date/DateTimeRange containment to cast the rhs to the correct type. """ lookup_name = 'contains' operator = RangeOperators.CONTAINS def process_rhs(self, compiler, connection): # Transform rhs value for db lookup. if isinstance(self.rhs, datetime.date): output_field = models.DateTimeField() if isinstance(self.rhs, datetime.datetime) else models.DateField() value = models.Value(self.rhs, output_field=output_field) self.rhs = value.resolve_expression(compiler.query) return super().process_rhs(compiler, connection) def as_sql(self, compiler, connection): sql, params = super().as_sql(compiler, connection) # Cast the rhs if needed. cast_sql = '' if ( isinstance(self.rhs, models.Expression) and self.rhs._output_field_or_none and # Skip cast if rhs has a matching range type. not isinstance(self.rhs._output_field_or_none, self.lhs.output_field.__class__) ): cast_internal_type = self.lhs.output_field.base_field.get_internal_type() cast_sql = '::{}'.format(connection.data_types.get(cast_internal_type)) return '%s%s' % (sql, cast_sql), params DateRangeField.register_lookup(DateTimeRangeContains) DateTimeRangeField.register_lookup(DateTimeRangeContains) class RangeContainedBy(lookups.PostgresSimpleLookup): lookup_name = 'contained_by' type_mapping = { 'integer': 'int4range', 'bigint': 'int8range', 'double precision': 'numrange', 'date': 'daterange', 'timestamp with time zone': 'tstzrange', } operator = RangeOperators.CONTAINED_BY def process_rhs(self, compiler, connection): rhs, rhs_params = super().process_rhs(compiler, connection) cast_type = self.type_mapping[self.lhs.output_field.db_type(connection)] return '%s::%s' % (rhs, cast_type), rhs_params def process_lhs(self, compiler, connection): lhs, lhs_params = super().process_lhs(compiler, connection) if isinstance(self.lhs.output_field, models.FloatField): lhs = '%s::numeric' % lhs return lhs, lhs_params def get_prep_lookup(self): return RangeField().get_prep_value(self.rhs) models.DateField.register_lookup(RangeContainedBy) models.DateTimeField.register_lookup(RangeContainedBy) models.IntegerField.register_lookup(RangeContainedBy) models.BigIntegerField.register_lookup(RangeContainedBy) models.FloatField.register_lookup(RangeContainedBy) @RangeField.register_lookup class FullyLessThan(lookups.PostgresSimpleLookup): lookup_name = 'fully_lt' operator = RangeOperators.FULLY_LT @RangeField.register_lookup class FullGreaterThan(lookups.PostgresSimpleLookup): lookup_name = 'fully_gt' operator = RangeOperators.FULLY_GT @RangeField.register_lookup class NotLessThan(lookups.PostgresSimpleLookup): lookup_name = 'not_lt' operator = RangeOperators.NOT_LT @RangeField.register_lookup class NotGreaterThan(lookups.PostgresSimpleLookup): lookup_name = 'not_gt' operator = RangeOperators.NOT_GT @RangeField.register_lookup class AdjacentToLookup(lookups.PostgresSimpleLookup): lookup_name = 'adjacent_to' operator = RangeOperators.ADJACENT_TO @RangeField.register_lookup class RangeStartsWith(models.Transform): lookup_name = 'startswith' function = 'lower' @property def output_field(self): return self.lhs.output_field.base_field @RangeField.register_lookup class RangeEndsWith(models.Transform): lookup_name = 'endswith' function = 'upper' @property def output_field(self): return self.lhs.output_field.base_field @RangeField.register_lookup class IsEmpty(models.Transform): lookup_name = 'isempty' function = 'isempty' output_field = models.BooleanField()
e0e400e7a6cb2fdcfa6bc9b2ea1d0bced432496e4323ae4cd53ec66ccff4f99e
import json from django.contrib.postgres import forms, lookups from django.contrib.postgres.fields.array import ArrayField from django.core import exceptions from django.db.models import Field, TextField, Transform from django.utils.translation import gettext_lazy as _ from .mixins import CheckFieldDefaultMixin __all__ = ['HStoreField'] class HStoreField(CheckFieldDefaultMixin, Field): empty_strings_allowed = False description = _('Map of strings to strings/nulls') default_error_messages = { 'not_a_string': _('The value of “%(key)s” is not a string or null.'), } _default_hint = ('dict', '{}') def db_type(self, connection): return 'hstore' def get_transform(self, name): transform = super().get_transform(name) if transform: return transform return KeyTransformFactory(name) def validate(self, value, model_instance): super().validate(value, model_instance) for key, val in value.items(): if not isinstance(val, str) and val is not None: raise exceptions.ValidationError( self.error_messages['not_a_string'], code='not_a_string', params={'key': key}, ) def to_python(self, value): if isinstance(value, str): value = json.loads(value) return value def value_to_string(self, obj): return json.dumps(self.value_from_object(obj)) def formfield(self, **kwargs): return super().formfield(**{ 'form_class': forms.HStoreField, **kwargs, }) def get_prep_value(self, value): value = super().get_prep_value(value) if isinstance(value, dict): prep_value = {} for key, val in value.items(): key = str(key) if val is not None: val = str(val) prep_value[key] = val value = prep_value if isinstance(value, list): value = [str(item) for item in value] return value HStoreField.register_lookup(lookups.DataContains) HStoreField.register_lookup(lookups.ContainedBy) HStoreField.register_lookup(lookups.HasKey) HStoreField.register_lookup(lookups.HasKeys) HStoreField.register_lookup(lookups.HasAnyKeys) class KeyTransform(Transform): output_field = TextField() def __init__(self, key_name, *args, **kwargs): super().__init__(*args, **kwargs) self.key_name = key_name def as_sql(self, compiler, connection): lhs, params = compiler.compile(self.lhs) return '(%s -> %%s)' % lhs, params + [self.key_name] class KeyTransformFactory: def __init__(self, key_name): self.key_name = key_name def __call__(self, *args, **kwargs): return KeyTransform(self.key_name, *args, **kwargs) @HStoreField.register_lookup class KeysTransform(Transform): lookup_name = 'keys' function = 'akeys' output_field = ArrayField(TextField()) @HStoreField.register_lookup class ValuesTransform(Transform): lookup_name = 'values' function = 'avals' output_field = ArrayField(TextField())
ebb35879e4eea96a98113ea45aa9658770066015143dec8e0f5150a4c48e84e2
import json from psycopg2.extras import Json from django.contrib.postgres import forms, lookups from django.core import exceptions from django.db.models import ( Field, TextField, Transform, lookups as builtin_lookups, ) from django.utils.translation import gettext_lazy as _ from .mixins import CheckFieldDefaultMixin __all__ = ['JSONField'] class JsonAdapter(Json): """ Customized psycopg2.extras.Json to allow for a custom encoder. """ def __init__(self, adapted, dumps=None, encoder=None): self.encoder = encoder super().__init__(adapted, dumps=dumps) def dumps(self, obj): options = {'cls': self.encoder} if self.encoder else {} return json.dumps(obj, **options) class JSONField(CheckFieldDefaultMixin, Field): empty_strings_allowed = False description = _('A JSON object') default_error_messages = { 'invalid': _("Value must be valid JSON."), } _default_hint = ('dict', '{}') def __init__(self, verbose_name=None, name=None, encoder=None, **kwargs): if encoder and not callable(encoder): raise ValueError("The encoder parameter must be a callable object.") self.encoder = encoder super().__init__(verbose_name, name, **kwargs) def db_type(self, connection): return 'jsonb' def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.encoder is not None: kwargs['encoder'] = self.encoder return name, path, args, kwargs def get_transform(self, name): transform = super().get_transform(name) if transform: return transform return KeyTransformFactory(name) def get_prep_value(self, value): if value is not None: return JsonAdapter(value, encoder=self.encoder) return value def validate(self, value, model_instance): super().validate(value, model_instance) options = {'cls': self.encoder} if self.encoder else {} try: json.dumps(value, **options) except TypeError: raise exceptions.ValidationError( self.error_messages['invalid'], code='invalid', params={'value': value}, ) def value_to_string(self, obj): return self.value_from_object(obj) def formfield(self, **kwargs): return super().formfield(**{ 'form_class': forms.JSONField, **kwargs, }) JSONField.register_lookup(lookups.DataContains) JSONField.register_lookup(lookups.ContainedBy) JSONField.register_lookup(lookups.HasKey) JSONField.register_lookup(lookups.HasKeys) JSONField.register_lookup(lookups.HasAnyKeys) JSONField.register_lookup(lookups.JSONExact) class KeyTransform(Transform): operator = '->' nested_operator = '#>' def __init__(self, key_name, *args, **kwargs): super().__init__(*args, **kwargs) self.key_name = key_name def as_sql(self, compiler, connection): key_transforms = [self.key_name] previous = self.lhs while isinstance(previous, KeyTransform): key_transforms.insert(0, previous.key_name) previous = previous.lhs lhs, params = compiler.compile(previous) if len(key_transforms) > 1: return '(%s %s %%s)' % (lhs, self.nested_operator), params + [key_transforms] try: lookup = int(self.key_name) except ValueError: lookup = self.key_name return '(%s %s %%s)' % (lhs, self.operator), params + [lookup] class KeyTextTransform(KeyTransform): operator = '->>' nested_operator = '#>>' output_field = TextField() class KeyTransformTextLookupMixin: """ Mixin for combining with a lookup expecting a text lhs from a JSONField key lookup. Make use of the ->> operator instead of casting key values to text and performing the lookup on the resulting representation. """ def __init__(self, key_transform, *args, **kwargs): assert isinstance(key_transform, KeyTransform) key_text_transform = KeyTextTransform( key_transform.key_name, *key_transform.source_expressions, **key_transform.extra ) super().__init__(key_text_transform, *args, **kwargs) class KeyTransformIExact(KeyTransformTextLookupMixin, builtin_lookups.IExact): pass class KeyTransformIContains(KeyTransformTextLookupMixin, builtin_lookups.IContains): pass class KeyTransformStartsWith(KeyTransformTextLookupMixin, builtin_lookups.StartsWith): pass class KeyTransformIStartsWith(KeyTransformTextLookupMixin, builtin_lookups.IStartsWith): pass class KeyTransformEndsWith(KeyTransformTextLookupMixin, builtin_lookups.EndsWith): pass class KeyTransformIEndsWith(KeyTransformTextLookupMixin, builtin_lookups.IEndsWith): pass class KeyTransformRegex(KeyTransformTextLookupMixin, builtin_lookups.Regex): pass class KeyTransformIRegex(KeyTransformTextLookupMixin, builtin_lookups.IRegex): pass KeyTransform.register_lookup(KeyTransformIExact) KeyTransform.register_lookup(KeyTransformIContains) KeyTransform.register_lookup(KeyTransformStartsWith) KeyTransform.register_lookup(KeyTransformIStartsWith) KeyTransform.register_lookup(KeyTransformEndsWith) KeyTransform.register_lookup(KeyTransformIEndsWith) KeyTransform.register_lookup(KeyTransformRegex) KeyTransform.register_lookup(KeyTransformIRegex) class KeyTransformFactory: def __init__(self, key_name): self.key_name = key_name def __call__(self, *args, **kwargs): return KeyTransform(self.key_name, *args, **kwargs)
831793a8eacff35817f9b063cc9a9086cf3f2e3793e73bb68e3a4751fdccee65
from django.core.exceptions import FieldDoesNotExist from django.db import connection, migrations, models, transaction from django.db.migrations.migration import Migration from django.db.migrations.operations import CreateModel from django.db.migrations.operations.fields import FieldOperation from django.db.migrations.state import ModelState, ProjectState from django.db.models.fields import NOT_PROVIDED from django.db.transaction import atomic from django.db.utils import IntegrityError from django.test import SimpleTestCase, override_settings, skipUnlessDBFeature from .models import FoodManager, FoodQuerySet, UnicodeModel from .test_base import MigrationTestBase class Mixin: pass class OperationTestBase(MigrationTestBase): """ Common functions to help test operations. """ @classmethod def setUpClass(cls): super().setUpClass() cls._initial_table_names = frozenset(connection.introspection.table_names()) def tearDown(self): self.cleanup_test_tables() super().tearDown() def cleanup_test_tables(self): table_names = frozenset(connection.introspection.table_names()) - self._initial_table_names with connection.schema_editor() as editor: with connection.constraint_checks_disabled(): for table_name in table_names: editor.execute(editor.sql_delete_table % { 'table': editor.quote_name(table_name), }) def apply_operations(self, app_label, project_state, operations, atomic=True): migration = Migration('name', app_label) migration.operations = operations with connection.schema_editor(atomic=atomic) as editor: return migration.apply(project_state, editor) def unapply_operations(self, app_label, project_state, operations, atomic=True): migration = Migration('name', app_label) migration.operations = operations with connection.schema_editor(atomic=atomic) as editor: return migration.unapply(project_state, editor) def make_test_state(self, app_label, operation, **kwargs): """ Makes a test state using set_up_test_model and returns the original state and the state after the migration is applied. """ project_state = self.set_up_test_model(app_label, **kwargs) new_state = project_state.clone() operation.state_forwards(app_label, new_state) return project_state, new_state def set_up_test_model( self, app_label, second_model=False, third_model=False, index=False, multicol_index=False, related_model=False, mti_model=False, proxy_model=False, manager_model=False, unique_together=False, options=False, db_table=None, index_together=False, constraints=None): """ Creates a test model state and database table. """ # Make the "current" state model_options = { "swappable": "TEST_SWAP_MODEL", "index_together": [["weight", "pink"]] if index_together else [], "unique_together": [["pink", "weight"]] if unique_together else [], } if options: model_options["permissions"] = [("can_groom", "Can groom")] if db_table: model_options["db_table"] = db_table operations = [migrations.CreateModel( "Pony", [ ("id", models.AutoField(primary_key=True)), ("pink", models.IntegerField(default=3)), ("weight", models.FloatField()), ], options=model_options, )] if index: operations.append(migrations.AddIndex( "Pony", models.Index(fields=["pink"], name="pony_pink_idx") )) if multicol_index: operations.append(migrations.AddIndex( "Pony", models.Index(fields=["pink", "weight"], name="pony_test_idx") )) if constraints: for constraint in constraints: operations.append(migrations.AddConstraint( "Pony", constraint, )) if second_model: operations.append(migrations.CreateModel( "Stable", [ ("id", models.AutoField(primary_key=True)), ] )) if third_model: operations.append(migrations.CreateModel( "Van", [ ("id", models.AutoField(primary_key=True)), ] )) if related_model: operations.append(migrations.CreateModel( "Rider", [ ("id", models.AutoField(primary_key=True)), ("pony", models.ForeignKey("Pony", models.CASCADE)), ("friend", models.ForeignKey("self", models.CASCADE)) ], )) if mti_model: operations.append(migrations.CreateModel( "ShetlandPony", fields=[ ('pony_ptr', models.OneToOneField( 'Pony', models.CASCADE, auto_created=True, parent_link=True, primary_key=True, to_field='id', serialize=False, )), ("cuteness", models.IntegerField(default=1)), ], bases=['%s.Pony' % app_label], )) if proxy_model: operations.append(migrations.CreateModel( "ProxyPony", fields=[], options={"proxy": True}, bases=['%s.Pony' % app_label], )) if manager_model: operations.append(migrations.CreateModel( "Food", fields=[ ("id", models.AutoField(primary_key=True)), ], managers=[ ("food_qs", FoodQuerySet.as_manager()), ("food_mgr", FoodManager("a", "b")), ("food_mgr_kwargs", FoodManager("x", "y", 3, 4)), ] )) return self.apply_operations(app_label, ProjectState(), operations) class OperationTests(OperationTestBase): """ Tests running the operations and making sure they do what they say they do. Each test looks at their state changing, and then their database operation - both forwards and backwards. """ def test_create_model(self): """ Tests the CreateModel operation. Most other tests use this operation as part of setup, so check failures here first. """ operation = migrations.CreateModel( "Pony", [ ("id", models.AutoField(primary_key=True)), ("pink", models.IntegerField(default=1)), ], ) self.assertEqual(operation.describe(), "Create model Pony") # Test the state alteration project_state = ProjectState() new_state = project_state.clone() operation.state_forwards("test_crmo", new_state) self.assertEqual(new_state.models["test_crmo", "pony"].name, "Pony") self.assertEqual(len(new_state.models["test_crmo", "pony"].fields), 2) # Test the database alteration self.assertTableNotExists("test_crmo_pony") with connection.schema_editor() as editor: operation.database_forwards("test_crmo", editor, project_state, new_state) self.assertTableExists("test_crmo_pony") # And test reversal with connection.schema_editor() as editor: operation.database_backwards("test_crmo", editor, new_state, project_state) self.assertTableNotExists("test_crmo_pony") # And deconstruction definition = operation.deconstruct() self.assertEqual(definition[0], "CreateModel") self.assertEqual(definition[1], []) self.assertEqual(sorted(definition[2]), ["fields", "name"]) # And default manager not in set operation = migrations.CreateModel("Foo", fields=[], managers=[("objects", models.Manager())]) definition = operation.deconstruct() self.assertNotIn('managers', definition[2]) def test_create_model_with_duplicate_field_name(self): with self.assertRaisesMessage(ValueError, 'Found duplicate value pink in CreateModel fields argument.'): migrations.CreateModel( "Pony", [ ("id", models.AutoField(primary_key=True)), ("pink", models.TextField()), ("pink", models.IntegerField(default=1)), ], ) def test_create_model_with_duplicate_base(self): message = 'Found duplicate value test_crmo.pony in CreateModel bases argument.' with self.assertRaisesMessage(ValueError, message): migrations.CreateModel( "Pony", fields=[], bases=("test_crmo.Pony", "test_crmo.Pony",), ) with self.assertRaisesMessage(ValueError, message): migrations.CreateModel( "Pony", fields=[], bases=("test_crmo.Pony", "test_crmo.pony",), ) message = 'Found duplicate value migrations.unicodemodel in CreateModel bases argument.' with self.assertRaisesMessage(ValueError, message): migrations.CreateModel( "Pony", fields=[], bases=(UnicodeModel, UnicodeModel,), ) with self.assertRaisesMessage(ValueError, message): migrations.CreateModel( "Pony", fields=[], bases=(UnicodeModel, 'migrations.unicodemodel',), ) with self.assertRaisesMessage(ValueError, message): migrations.CreateModel( "Pony", fields=[], bases=(UnicodeModel, 'migrations.UnicodeModel',), ) message = "Found duplicate value <class 'django.db.models.base.Model'> in CreateModel bases argument." with self.assertRaisesMessage(ValueError, message): migrations.CreateModel( "Pony", fields=[], bases=(models.Model, models.Model,), ) message = "Found duplicate value <class 'migrations.test_operations.Mixin'> in CreateModel bases argument." with self.assertRaisesMessage(ValueError, message): migrations.CreateModel( "Pony", fields=[], bases=(Mixin, Mixin,), ) def test_create_model_with_duplicate_manager_name(self): with self.assertRaisesMessage(ValueError, 'Found duplicate value objects in CreateModel managers argument.'): migrations.CreateModel( "Pony", fields=[], managers=[ ("objects", models.Manager()), ("objects", models.Manager()), ], ) def test_create_model_with_unique_after(self): """ Tests the CreateModel operation directly followed by an AlterUniqueTogether (bug #22844 - sqlite remake issues) """ operation1 = migrations.CreateModel( "Pony", [ ("id", models.AutoField(primary_key=True)), ("pink", models.IntegerField(default=1)), ], ) operation2 = migrations.CreateModel( "Rider", [ ("id", models.AutoField(primary_key=True)), ("number", models.IntegerField(default=1)), ("pony", models.ForeignKey("test_crmoua.Pony", models.CASCADE)), ], ) operation3 = migrations.AlterUniqueTogether( "Rider", [ ("number", "pony"), ], ) # Test the database alteration project_state = ProjectState() self.assertTableNotExists("test_crmoua_pony") self.assertTableNotExists("test_crmoua_rider") with connection.schema_editor() as editor: new_state = project_state.clone() operation1.state_forwards("test_crmoua", new_state) operation1.database_forwards("test_crmoua", editor, project_state, new_state) project_state, new_state = new_state, new_state.clone() operation2.state_forwards("test_crmoua", new_state) operation2.database_forwards("test_crmoua", editor, project_state, new_state) project_state, new_state = new_state, new_state.clone() operation3.state_forwards("test_crmoua", new_state) operation3.database_forwards("test_crmoua", editor, project_state, new_state) self.assertTableExists("test_crmoua_pony") self.assertTableExists("test_crmoua_rider") def test_create_model_m2m(self): """ Test the creation of a model with a ManyToMany field and the auto-created "through" model. """ project_state = self.set_up_test_model("test_crmomm") operation = migrations.CreateModel( "Stable", [ ("id", models.AutoField(primary_key=True)), ("ponies", models.ManyToManyField("Pony", related_name="stables")) ] ) # Test the state alteration new_state = project_state.clone() operation.state_forwards("test_crmomm", new_state) # Test the database alteration self.assertTableNotExists("test_crmomm_stable_ponies") with connection.schema_editor() as editor: operation.database_forwards("test_crmomm", editor, project_state, new_state) self.assertTableExists("test_crmomm_stable") self.assertTableExists("test_crmomm_stable_ponies") self.assertColumnNotExists("test_crmomm_stable", "ponies") # Make sure the M2M field actually works with atomic(): Pony = new_state.apps.get_model("test_crmomm", "Pony") Stable = new_state.apps.get_model("test_crmomm", "Stable") stable = Stable.objects.create() p1 = Pony.objects.create(pink=False, weight=4.55) p2 = Pony.objects.create(pink=True, weight=5.43) stable.ponies.add(p1, p2) self.assertEqual(stable.ponies.count(), 2) stable.ponies.all().delete() # And test reversal with connection.schema_editor() as editor: operation.database_backwards("test_crmomm", editor, new_state, project_state) self.assertTableNotExists("test_crmomm_stable") self.assertTableNotExists("test_crmomm_stable_ponies") def test_create_model_inheritance(self): """ Tests the CreateModel operation on a multi-table inheritance setup. """ project_state = self.set_up_test_model("test_crmoih") # Test the state alteration operation = migrations.CreateModel( "ShetlandPony", [ ('pony_ptr', models.OneToOneField( 'test_crmoih.Pony', models.CASCADE, auto_created=True, primary_key=True, to_field='id', serialize=False, )), ("cuteness", models.IntegerField(default=1)), ], ) new_state = project_state.clone() operation.state_forwards("test_crmoih", new_state) self.assertIn(("test_crmoih", "shetlandpony"), new_state.models) # Test the database alteration self.assertTableNotExists("test_crmoih_shetlandpony") with connection.schema_editor() as editor: operation.database_forwards("test_crmoih", editor, project_state, new_state) self.assertTableExists("test_crmoih_shetlandpony") # And test reversal with connection.schema_editor() as editor: operation.database_backwards("test_crmoih", editor, new_state, project_state) self.assertTableNotExists("test_crmoih_shetlandpony") def test_create_proxy_model(self): """ CreateModel ignores proxy models. """ project_state = self.set_up_test_model("test_crprmo") # Test the state alteration operation = migrations.CreateModel( "ProxyPony", [], options={"proxy": True}, bases=("test_crprmo.Pony",), ) self.assertEqual(operation.describe(), "Create proxy model ProxyPony") new_state = project_state.clone() operation.state_forwards("test_crprmo", new_state) self.assertIn(("test_crprmo", "proxypony"), new_state.models) # Test the database alteration self.assertTableNotExists("test_crprmo_proxypony") self.assertTableExists("test_crprmo_pony") with connection.schema_editor() as editor: operation.database_forwards("test_crprmo", editor, project_state, new_state) self.assertTableNotExists("test_crprmo_proxypony") self.assertTableExists("test_crprmo_pony") # And test reversal with connection.schema_editor() as editor: operation.database_backwards("test_crprmo", editor, new_state, project_state) self.assertTableNotExists("test_crprmo_proxypony") self.assertTableExists("test_crprmo_pony") # And deconstruction definition = operation.deconstruct() self.assertEqual(definition[0], "CreateModel") self.assertEqual(definition[1], []) self.assertEqual(sorted(definition[2]), ["bases", "fields", "name", "options"]) def test_create_unmanaged_model(self): """ CreateModel ignores unmanaged models. """ project_state = self.set_up_test_model("test_crummo") # Test the state alteration operation = migrations.CreateModel( "UnmanagedPony", [], options={"proxy": True}, bases=("test_crummo.Pony",), ) self.assertEqual(operation.describe(), "Create proxy model UnmanagedPony") new_state = project_state.clone() operation.state_forwards("test_crummo", new_state) self.assertIn(("test_crummo", "unmanagedpony"), new_state.models) # Test the database alteration self.assertTableNotExists("test_crummo_unmanagedpony") self.assertTableExists("test_crummo_pony") with connection.schema_editor() as editor: operation.database_forwards("test_crummo", editor, project_state, new_state) self.assertTableNotExists("test_crummo_unmanagedpony") self.assertTableExists("test_crummo_pony") # And test reversal with connection.schema_editor() as editor: operation.database_backwards("test_crummo", editor, new_state, project_state) self.assertTableNotExists("test_crummo_unmanagedpony") self.assertTableExists("test_crummo_pony") @skipUnlessDBFeature('supports_table_check_constraints') def test_create_model_with_constraint(self): where = models.Q(pink__gt=2) check_constraint = models.CheckConstraint(check=where, name='test_constraint_pony_pink_gt_2') operation = migrations.CreateModel( "Pony", [ ("id", models.AutoField(primary_key=True)), ("pink", models.IntegerField(default=3)), ], options={'constraints': [check_constraint]}, ) # Test the state alteration project_state = ProjectState() new_state = project_state.clone() operation.state_forwards("test_crmo", new_state) self.assertEqual(len(new_state.models['test_crmo', 'pony'].options['constraints']), 1) # Test database alteration self.assertTableNotExists("test_crmo_pony") with connection.schema_editor() as editor: operation.database_forwards("test_crmo", editor, project_state, new_state) self.assertTableExists("test_crmo_pony") with connection.cursor() as cursor: with self.assertRaises(IntegrityError): cursor.execute("INSERT INTO test_crmo_pony (id, pink) VALUES (1, 1)") # Test reversal with connection.schema_editor() as editor: operation.database_backwards("test_crmo", editor, new_state, project_state) self.assertTableNotExists("test_crmo_pony") # Test deconstruction definition = operation.deconstruct() self.assertEqual(definition[0], "CreateModel") self.assertEqual(definition[1], []) self.assertEqual(definition[2]['options']['constraints'], [check_constraint]) def test_create_model_with_partial_unique_constraint(self): partial_unique_constraint = models.UniqueConstraint( fields=['pink'], condition=models.Q(weight__gt=5), name='test_constraint_pony_pink_for_weight_gt_5_uniq', ) operation = migrations.CreateModel( 'Pony', [ ('id', models.AutoField(primary_key=True)), ('pink', models.IntegerField(default=3)), ('weight', models.FloatField()), ], options={'constraints': [partial_unique_constraint]}, ) # Test the state alteration project_state = ProjectState() new_state = project_state.clone() operation.state_forwards('test_crmo', new_state) self.assertEqual(len(new_state.models['test_crmo', 'pony'].options['constraints']), 1) # Test database alteration self.assertTableNotExists('test_crmo_pony') with connection.schema_editor() as editor: operation.database_forwards('test_crmo', editor, project_state, new_state) self.assertTableExists('test_crmo_pony') # Test constraint works Pony = new_state.apps.get_model('test_crmo', 'Pony') Pony.objects.create(pink=1, weight=4.0) Pony.objects.create(pink=1, weight=4.0) Pony.objects.create(pink=1, weight=6.0) if connection.features.supports_partial_indexes: with self.assertRaises(IntegrityError): Pony.objects.create(pink=1, weight=7.0) else: Pony.objects.create(pink=1, weight=7.0) # Test reversal with connection.schema_editor() as editor: operation.database_backwards('test_crmo', editor, new_state, project_state) self.assertTableNotExists('test_crmo_pony') # Test deconstruction definition = operation.deconstruct() self.assertEqual(definition[0], 'CreateModel') self.assertEqual(definition[1], []) self.assertEqual(definition[2]['options']['constraints'], [partial_unique_constraint]) def test_create_model_managers(self): """ The managers on a model are set. """ project_state = self.set_up_test_model("test_cmoma") # Test the state alteration operation = migrations.CreateModel( "Food", fields=[ ("id", models.AutoField(primary_key=True)), ], managers=[ ("food_qs", FoodQuerySet.as_manager()), ("food_mgr", FoodManager("a", "b")), ("food_mgr_kwargs", FoodManager("x", "y", 3, 4)), ] ) self.assertEqual(operation.describe(), "Create model Food") new_state = project_state.clone() operation.state_forwards("test_cmoma", new_state) self.assertIn(("test_cmoma", "food"), new_state.models) managers = new_state.models["test_cmoma", "food"].managers self.assertEqual(managers[0][0], "food_qs") self.assertIsInstance(managers[0][1], models.Manager) self.assertEqual(managers[1][0], "food_mgr") self.assertIsInstance(managers[1][1], FoodManager) self.assertEqual(managers[1][1].args, ("a", "b", 1, 2)) self.assertEqual(managers[2][0], "food_mgr_kwargs") self.assertIsInstance(managers[2][1], FoodManager) self.assertEqual(managers[2][1].args, ("x", "y", 3, 4)) def test_delete_model(self): """ Tests the DeleteModel operation. """ project_state = self.set_up_test_model("test_dlmo") # Test the state alteration operation = migrations.DeleteModel("Pony") self.assertEqual(operation.describe(), "Delete model Pony") new_state = project_state.clone() operation.state_forwards("test_dlmo", new_state) self.assertNotIn(("test_dlmo", "pony"), new_state.models) # Test the database alteration self.assertTableExists("test_dlmo_pony") with connection.schema_editor() as editor: operation.database_forwards("test_dlmo", editor, project_state, new_state) self.assertTableNotExists("test_dlmo_pony") # And test reversal with connection.schema_editor() as editor: operation.database_backwards("test_dlmo", editor, new_state, project_state) self.assertTableExists("test_dlmo_pony") # And deconstruction definition = operation.deconstruct() self.assertEqual(definition[0], "DeleteModel") self.assertEqual(definition[1], []) self.assertEqual(list(definition[2]), ["name"]) def test_delete_proxy_model(self): """ Tests the DeleteModel operation ignores proxy models. """ project_state = self.set_up_test_model("test_dlprmo", proxy_model=True) # Test the state alteration operation = migrations.DeleteModel("ProxyPony") new_state = project_state.clone() operation.state_forwards("test_dlprmo", new_state) self.assertIn(("test_dlprmo", "proxypony"), project_state.models) self.assertNotIn(("test_dlprmo", "proxypony"), new_state.models) # Test the database alteration self.assertTableExists("test_dlprmo_pony") self.assertTableNotExists("test_dlprmo_proxypony") with connection.schema_editor() as editor: operation.database_forwards("test_dlprmo", editor, project_state, new_state) self.assertTableExists("test_dlprmo_pony") self.assertTableNotExists("test_dlprmo_proxypony") # And test reversal with connection.schema_editor() as editor: operation.database_backwards("test_dlprmo", editor, new_state, project_state) self.assertTableExists("test_dlprmo_pony") self.assertTableNotExists("test_dlprmo_proxypony") def test_delete_mti_model(self): project_state = self.set_up_test_model('test_dlmtimo', mti_model=True) # Test the state alteration operation = migrations.DeleteModel('ShetlandPony') new_state = project_state.clone() operation.state_forwards('test_dlmtimo', new_state) self.assertIn(('test_dlmtimo', 'shetlandpony'), project_state.models) self.assertNotIn(('test_dlmtimo', 'shetlandpony'), new_state.models) # Test the database alteration self.assertTableExists('test_dlmtimo_pony') self.assertTableExists('test_dlmtimo_shetlandpony') self.assertColumnExists('test_dlmtimo_shetlandpony', 'pony_ptr_id') with connection.schema_editor() as editor: operation.database_forwards('test_dlmtimo', editor, project_state, new_state) self.assertTableExists('test_dlmtimo_pony') self.assertTableNotExists('test_dlmtimo_shetlandpony') # And test reversal with connection.schema_editor() as editor: operation.database_backwards('test_dlmtimo', editor, new_state, project_state) self.assertTableExists('test_dlmtimo_pony') self.assertTableExists('test_dlmtimo_shetlandpony') self.assertColumnExists('test_dlmtimo_shetlandpony', 'pony_ptr_id') def test_rename_model(self): """ Tests the RenameModel operation. """ project_state = self.set_up_test_model("test_rnmo", related_model=True) # Test the state alteration operation = migrations.RenameModel("Pony", "Horse") self.assertEqual(operation.describe(), "Rename model Pony to Horse") # Test initial state and database self.assertIn(("test_rnmo", "pony"), project_state.models) self.assertNotIn(("test_rnmo", "horse"), project_state.models) self.assertTableExists("test_rnmo_pony") self.assertTableNotExists("test_rnmo_horse") if connection.features.supports_foreign_keys: self.assertFKExists("test_rnmo_rider", ["pony_id"], ("test_rnmo_pony", "id")) self.assertFKNotExists("test_rnmo_rider", ["pony_id"], ("test_rnmo_horse", "id")) # Migrate forwards new_state = project_state.clone() atomic_rename = connection.features.supports_atomic_references_rename new_state = self.apply_operations("test_rnmo", new_state, [operation], atomic=atomic_rename) # Test new state and database self.assertNotIn(("test_rnmo", "pony"), new_state.models) self.assertIn(("test_rnmo", "horse"), new_state.models) # RenameModel also repoints all incoming FKs and M2Ms self.assertEqual("test_rnmo.Horse", new_state.models["test_rnmo", "rider"].fields[1][1].remote_field.model) self.assertTableNotExists("test_rnmo_pony") self.assertTableExists("test_rnmo_horse") if connection.features.supports_foreign_keys: self.assertFKNotExists("test_rnmo_rider", ["pony_id"], ("test_rnmo_pony", "id")) self.assertFKExists("test_rnmo_rider", ["pony_id"], ("test_rnmo_horse", "id")) # Migrate backwards original_state = self.unapply_operations("test_rnmo", project_state, [operation], atomic=atomic_rename) # Test original state and database self.assertIn(("test_rnmo", "pony"), original_state.models) self.assertNotIn(("test_rnmo", "horse"), original_state.models) self.assertEqual("Pony", original_state.models["test_rnmo", "rider"].fields[1][1].remote_field.model) self.assertTableExists("test_rnmo_pony") self.assertTableNotExists("test_rnmo_horse") if connection.features.supports_foreign_keys: self.assertFKExists("test_rnmo_rider", ["pony_id"], ("test_rnmo_pony", "id")) self.assertFKNotExists("test_rnmo_rider", ["pony_id"], ("test_rnmo_horse", "id")) # And deconstruction definition = operation.deconstruct() self.assertEqual(definition[0], "RenameModel") self.assertEqual(definition[1], []) self.assertEqual(definition[2], {'old_name': "Pony", 'new_name': "Horse"}) def test_rename_model_state_forwards(self): """ RenameModel operations shouldn't trigger the caching of rendered apps on state without prior apps. """ state = ProjectState() state.add_model(ModelState('migrations', 'Foo', [])) operation = migrations.RenameModel('Foo', 'Bar') operation.state_forwards('migrations', state) self.assertNotIn('apps', state.__dict__) self.assertNotIn(('migrations', 'foo'), state.models) self.assertIn(('migrations', 'bar'), state.models) # Now with apps cached. apps = state.apps operation = migrations.RenameModel('Bar', 'Foo') operation.state_forwards('migrations', state) self.assertIs(state.apps, apps) self.assertNotIn(('migrations', 'bar'), state.models) self.assertIn(('migrations', 'foo'), state.models) def test_rename_model_with_self_referential_fk(self): """ Tests the RenameModel operation on model with self referential FK. """ project_state = self.set_up_test_model("test_rmwsrf", related_model=True) # Test the state alteration operation = migrations.RenameModel("Rider", "HorseRider") self.assertEqual(operation.describe(), "Rename model Rider to HorseRider") new_state = project_state.clone() operation.state_forwards("test_rmwsrf", new_state) self.assertNotIn(("test_rmwsrf", "rider"), new_state.models) self.assertIn(("test_rmwsrf", "horserider"), new_state.models) # Remember, RenameModel also repoints all incoming FKs and M2Ms self.assertEqual( 'self', new_state.models["test_rmwsrf", "horserider"].fields[2][1].remote_field.model ) HorseRider = new_state.apps.get_model('test_rmwsrf', 'horserider') self.assertIs(HorseRider._meta.get_field('horserider').remote_field.model, HorseRider) # Test the database alteration self.assertTableExists("test_rmwsrf_rider") self.assertTableNotExists("test_rmwsrf_horserider") if connection.features.supports_foreign_keys: self.assertFKExists("test_rmwsrf_rider", ["friend_id"], ("test_rmwsrf_rider", "id")) self.assertFKNotExists("test_rmwsrf_rider", ["friend_id"], ("test_rmwsrf_horserider", "id")) atomic_rename = connection.features.supports_atomic_references_rename with connection.schema_editor(atomic=atomic_rename) as editor: operation.database_forwards("test_rmwsrf", editor, project_state, new_state) self.assertTableNotExists("test_rmwsrf_rider") self.assertTableExists("test_rmwsrf_horserider") if connection.features.supports_foreign_keys: self.assertFKNotExists("test_rmwsrf_horserider", ["friend_id"], ("test_rmwsrf_rider", "id")) self.assertFKExists("test_rmwsrf_horserider", ["friend_id"], ("test_rmwsrf_horserider", "id")) # And test reversal with connection.schema_editor(atomic=atomic_rename) as editor: operation.database_backwards("test_rmwsrf", editor, new_state, project_state) self.assertTableExists("test_rmwsrf_rider") self.assertTableNotExists("test_rmwsrf_horserider") if connection.features.supports_foreign_keys: self.assertFKExists("test_rmwsrf_rider", ["friend_id"], ("test_rmwsrf_rider", "id")) self.assertFKNotExists("test_rmwsrf_rider", ["friend_id"], ("test_rmwsrf_horserider", "id")) def test_rename_model_with_superclass_fk(self): """ Tests the RenameModel operation on a model which has a superclass that has a foreign key. """ project_state = self.set_up_test_model("test_rmwsc", related_model=True, mti_model=True) # Test the state alteration operation = migrations.RenameModel("ShetlandPony", "LittleHorse") self.assertEqual(operation.describe(), "Rename model ShetlandPony to LittleHorse") new_state = project_state.clone() operation.state_forwards("test_rmwsc", new_state) self.assertNotIn(("test_rmwsc", "shetlandpony"), new_state.models) self.assertIn(("test_rmwsc", "littlehorse"), new_state.models) # RenameModel shouldn't repoint the superclass's relations, only local ones self.assertEqual( project_state.models["test_rmwsc", "rider"].fields[1][1].remote_field.model, new_state.models["test_rmwsc", "rider"].fields[1][1].remote_field.model ) # Before running the migration we have a table for Shetland Pony, not Little Horse self.assertTableExists("test_rmwsc_shetlandpony") self.assertTableNotExists("test_rmwsc_littlehorse") if connection.features.supports_foreign_keys: # and the foreign key on rider points to pony, not shetland pony self.assertFKExists("test_rmwsc_rider", ["pony_id"], ("test_rmwsc_pony", "id")) self.assertFKNotExists("test_rmwsc_rider", ["pony_id"], ("test_rmwsc_shetlandpony", "id")) with connection.schema_editor(atomic=connection.features.supports_atomic_references_rename) as editor: operation.database_forwards("test_rmwsc", editor, project_state, new_state) # Now we have a little horse table, not shetland pony self.assertTableNotExists("test_rmwsc_shetlandpony") self.assertTableExists("test_rmwsc_littlehorse") if connection.features.supports_foreign_keys: # but the Foreign keys still point at pony, not little horse self.assertFKExists("test_rmwsc_rider", ["pony_id"], ("test_rmwsc_pony", "id")) self.assertFKNotExists("test_rmwsc_rider", ["pony_id"], ("test_rmwsc_littlehorse", "id")) def test_rename_model_with_self_referential_m2m(self): app_label = "test_rename_model_with_self_referential_m2m" project_state = self.apply_operations(app_label, ProjectState(), operations=[ migrations.CreateModel("ReflexivePony", fields=[ ("id", models.AutoField(primary_key=True)), ("ponies", models.ManyToManyField("self")), ]), ]) project_state = self.apply_operations(app_label, project_state, operations=[ migrations.RenameModel("ReflexivePony", "ReflexivePony2"), ], atomic=connection.features.supports_atomic_references_rename) Pony = project_state.apps.get_model(app_label, "ReflexivePony2") pony = Pony.objects.create() pony.ponies.add(pony) def test_rename_model_with_m2m(self): app_label = "test_rename_model_with_m2m" project_state = self.apply_operations(app_label, ProjectState(), operations=[ migrations.CreateModel("Rider", fields=[ ("id", models.AutoField(primary_key=True)), ]), migrations.CreateModel("Pony", fields=[ ("id", models.AutoField(primary_key=True)), ("riders", models.ManyToManyField("Rider")), ]), ]) Pony = project_state.apps.get_model(app_label, "Pony") Rider = project_state.apps.get_model(app_label, "Rider") pony = Pony.objects.create() rider = Rider.objects.create() pony.riders.add(rider) project_state = self.apply_operations(app_label, project_state, operations=[ migrations.RenameModel("Pony", "Pony2"), ], atomic=connection.features.supports_atomic_references_rename) Pony = project_state.apps.get_model(app_label, "Pony2") Rider = project_state.apps.get_model(app_label, "Rider") pony = Pony.objects.create() rider = Rider.objects.create() pony.riders.add(rider) self.assertEqual(Pony.objects.count(), 2) self.assertEqual(Rider.objects.count(), 2) self.assertEqual(Pony._meta.get_field('riders').remote_field.through.objects.count(), 2) def test_rename_m2m_target_model(self): app_label = "test_rename_m2m_target_model" project_state = self.apply_operations(app_label, ProjectState(), operations=[ migrations.CreateModel("Rider", fields=[ ("id", models.AutoField(primary_key=True)), ]), migrations.CreateModel("Pony", fields=[ ("id", models.AutoField(primary_key=True)), ("riders", models.ManyToManyField("Rider")), ]), ]) Pony = project_state.apps.get_model(app_label, "Pony") Rider = project_state.apps.get_model(app_label, "Rider") pony = Pony.objects.create() rider = Rider.objects.create() pony.riders.add(rider) project_state = self.apply_operations(app_label, project_state, operations=[ migrations.RenameModel("Rider", "Rider2"), ], atomic=connection.features.supports_atomic_references_rename) Pony = project_state.apps.get_model(app_label, "Pony") Rider = project_state.apps.get_model(app_label, "Rider2") pony = Pony.objects.create() rider = Rider.objects.create() pony.riders.add(rider) self.assertEqual(Pony.objects.count(), 2) self.assertEqual(Rider.objects.count(), 2) self.assertEqual(Pony._meta.get_field('riders').remote_field.through.objects.count(), 2) def test_rename_m2m_through_model(self): app_label = "test_rename_through" project_state = self.apply_operations(app_label, ProjectState(), operations=[ migrations.CreateModel("Rider", fields=[ ("id", models.AutoField(primary_key=True)), ]), migrations.CreateModel("Pony", fields=[ ("id", models.AutoField(primary_key=True)), ]), migrations.CreateModel("PonyRider", fields=[ ("id", models.AutoField(primary_key=True)), ("rider", models.ForeignKey("test_rename_through.Rider", models.CASCADE)), ("pony", models.ForeignKey("test_rename_through.Pony", models.CASCADE)), ]), migrations.AddField( "Pony", "riders", models.ManyToManyField("test_rename_through.Rider", through="test_rename_through.PonyRider"), ), ]) Pony = project_state.apps.get_model(app_label, "Pony") Rider = project_state.apps.get_model(app_label, "Rider") PonyRider = project_state.apps.get_model(app_label, "PonyRider") pony = Pony.objects.create() rider = Rider.objects.create() PonyRider.objects.create(pony=pony, rider=rider) project_state = self.apply_operations(app_label, project_state, operations=[ migrations.RenameModel("PonyRider", "PonyRider2"), ]) Pony = project_state.apps.get_model(app_label, "Pony") Rider = project_state.apps.get_model(app_label, "Rider") PonyRider = project_state.apps.get_model(app_label, "PonyRider2") pony = Pony.objects.first() rider = Rider.objects.create() PonyRider.objects.create(pony=pony, rider=rider) self.assertEqual(Pony.objects.count(), 1) self.assertEqual(Rider.objects.count(), 2) self.assertEqual(PonyRider.objects.count(), 2) self.assertEqual(pony.riders.count(), 2) def test_rename_m2m_model_after_rename_field(self): """RenameModel renames a many-to-many column after a RenameField.""" app_label = 'test_rename_multiple' project_state = self.apply_operations(app_label, ProjectState(), operations=[ migrations.CreateModel('Pony', fields=[ ('id', models.AutoField(primary_key=True)), ('name', models.CharField(max_length=20)), ]), migrations.CreateModel('Rider', fields=[ ('id', models.AutoField(primary_key=True)), ('pony', models.ForeignKey('test_rename_multiple.Pony', models.CASCADE)), ]), migrations.CreateModel('PonyRider', fields=[ ('id', models.AutoField(primary_key=True)), ('riders', models.ManyToManyField('Rider')), ]), migrations.RenameField(model_name='pony', old_name='name', new_name='fancy_name'), migrations.RenameModel(old_name='Rider', new_name='Jockey'), ], atomic=connection.features.supports_atomic_references_rename) Pony = project_state.apps.get_model(app_label, 'Pony') Jockey = project_state.apps.get_model(app_label, 'Jockey') PonyRider = project_state.apps.get_model(app_label, 'PonyRider') # No "no such column" error means the column was renamed correctly. pony = Pony.objects.create(fancy_name='a good name') jockey = Jockey.objects.create(pony=pony) ponyrider = PonyRider.objects.create() ponyrider.riders.add(jockey) def test_add_field(self): """ Tests the AddField operation. """ # Test the state alteration operation = migrations.AddField( "Pony", "height", models.FloatField(null=True, default=5), ) self.assertEqual(operation.describe(), "Add field height to Pony") project_state, new_state = self.make_test_state("test_adfl", operation) self.assertEqual(len(new_state.models["test_adfl", "pony"].fields), 4) field = [ f for n, f in new_state.models["test_adfl", "pony"].fields if n == "height" ][0] self.assertEqual(field.default, 5) # Test the database alteration self.assertColumnNotExists("test_adfl_pony", "height") with connection.schema_editor() as editor: operation.database_forwards("test_adfl", editor, project_state, new_state) self.assertColumnExists("test_adfl_pony", "height") # And test reversal with connection.schema_editor() as editor: operation.database_backwards("test_adfl", editor, new_state, project_state) self.assertColumnNotExists("test_adfl_pony", "height") # And deconstruction definition = operation.deconstruct() self.assertEqual(definition[0], "AddField") self.assertEqual(definition[1], []) self.assertEqual(sorted(definition[2]), ["field", "model_name", "name"]) def test_add_charfield(self): """ Tests the AddField operation on TextField. """ project_state = self.set_up_test_model("test_adchfl") Pony = project_state.apps.get_model("test_adchfl", "Pony") pony = Pony.objects.create(weight=42) new_state = self.apply_operations("test_adchfl", project_state, [ migrations.AddField( "Pony", "text", models.CharField(max_length=10, default="some text"), ), migrations.AddField( "Pony", "empty", models.CharField(max_length=10, default=""), ), # If not properly quoted digits would be interpreted as an int. migrations.AddField( "Pony", "digits", models.CharField(max_length=10, default="42"), ), # Manual quoting is fragile and could trip on quotes. Refs #xyz. migrations.AddField( "Pony", "quotes", models.CharField(max_length=10, default='"\'"'), ), ]) Pony = new_state.apps.get_model("test_adchfl", "Pony") pony = Pony.objects.get(pk=pony.pk) self.assertEqual(pony.text, "some text") self.assertEqual(pony.empty, "") self.assertEqual(pony.digits, "42") self.assertEqual(pony.quotes, '"\'"') def test_add_textfield(self): """ Tests the AddField operation on TextField. """ project_state = self.set_up_test_model("test_adtxtfl") Pony = project_state.apps.get_model("test_adtxtfl", "Pony") pony = Pony.objects.create(weight=42) new_state = self.apply_operations("test_adtxtfl", project_state, [ migrations.AddField( "Pony", "text", models.TextField(default="some text"), ), migrations.AddField( "Pony", "empty", models.TextField(default=""), ), # If not properly quoted digits would be interpreted as an int. migrations.AddField( "Pony", "digits", models.TextField(default="42"), ), # Manual quoting is fragile and could trip on quotes. Refs #xyz. migrations.AddField( "Pony", "quotes", models.TextField(default='"\'"'), ), ]) Pony = new_state.apps.get_model("test_adtxtfl", "Pony") pony = Pony.objects.get(pk=pony.pk) self.assertEqual(pony.text, "some text") self.assertEqual(pony.empty, "") self.assertEqual(pony.digits, "42") self.assertEqual(pony.quotes, '"\'"') def test_add_binaryfield(self): """ Tests the AddField operation on TextField/BinaryField. """ project_state = self.set_up_test_model("test_adbinfl") Pony = project_state.apps.get_model("test_adbinfl", "Pony") pony = Pony.objects.create(weight=42) new_state = self.apply_operations("test_adbinfl", project_state, [ migrations.AddField( "Pony", "blob", models.BinaryField(default=b"some text"), ), migrations.AddField( "Pony", "empty", models.BinaryField(default=b""), ), # If not properly quoted digits would be interpreted as an int. migrations.AddField( "Pony", "digits", models.BinaryField(default=b"42"), ), # Manual quoting is fragile and could trip on quotes. Refs #xyz. migrations.AddField( "Pony", "quotes", models.BinaryField(default=b'"\'"'), ), ]) Pony = new_state.apps.get_model("test_adbinfl", "Pony") pony = Pony.objects.get(pk=pony.pk) # SQLite returns buffer/memoryview, cast to bytes for checking. self.assertEqual(bytes(pony.blob), b"some text") self.assertEqual(bytes(pony.empty), b"") self.assertEqual(bytes(pony.digits), b"42") self.assertEqual(bytes(pony.quotes), b'"\'"') def test_column_name_quoting(self): """ Column names that are SQL keywords shouldn't cause problems when used in migrations (#22168). """ project_state = self.set_up_test_model("test_regr22168") operation = migrations.AddField( "Pony", "order", models.IntegerField(default=0), ) new_state = project_state.clone() operation.state_forwards("test_regr22168", new_state) with connection.schema_editor() as editor: operation.database_forwards("test_regr22168", editor, project_state, new_state) self.assertColumnExists("test_regr22168_pony", "order") def test_add_field_preserve_default(self): """ Tests the AddField operation's state alteration when preserve_default = False. """ project_state = self.set_up_test_model("test_adflpd") # Test the state alteration operation = migrations.AddField( "Pony", "height", models.FloatField(null=True, default=4), preserve_default=False, ) new_state = project_state.clone() operation.state_forwards("test_adflpd", new_state) self.assertEqual(len(new_state.models["test_adflpd", "pony"].fields), 4) field = [ f for n, f in new_state.models["test_adflpd", "pony"].fields if n == "height" ][0] self.assertEqual(field.default, NOT_PROVIDED) # Test the database alteration project_state.apps.get_model("test_adflpd", "pony").objects.create( weight=4, ) self.assertColumnNotExists("test_adflpd_pony", "height") with connection.schema_editor() as editor: operation.database_forwards("test_adflpd", editor, project_state, new_state) self.assertColumnExists("test_adflpd_pony", "height") # And deconstruction definition = operation.deconstruct() self.assertEqual(definition[0], "AddField") self.assertEqual(definition[1], []) self.assertEqual(sorted(definition[2]), ["field", "model_name", "name", "preserve_default"]) def test_add_field_m2m(self): """ Tests the AddField operation with a ManyToManyField. """ project_state = self.set_up_test_model("test_adflmm", second_model=True) # Test the state alteration operation = migrations.AddField("Pony", "stables", models.ManyToManyField("Stable", related_name="ponies")) new_state = project_state.clone() operation.state_forwards("test_adflmm", new_state) self.assertEqual(len(new_state.models["test_adflmm", "pony"].fields), 4) # Test the database alteration self.assertTableNotExists("test_adflmm_pony_stables") with connection.schema_editor() as editor: operation.database_forwards("test_adflmm", editor, project_state, new_state) self.assertTableExists("test_adflmm_pony_stables") self.assertColumnNotExists("test_adflmm_pony", "stables") # Make sure the M2M field actually works with atomic(): Pony = new_state.apps.get_model("test_adflmm", "Pony") p = Pony.objects.create(pink=False, weight=4.55) p.stables.create() self.assertEqual(p.stables.count(), 1) p.stables.all().delete() # And test reversal with connection.schema_editor() as editor: operation.database_backwards("test_adflmm", editor, new_state, project_state) self.assertTableNotExists("test_adflmm_pony_stables") def test_alter_field_m2m(self): project_state = self.set_up_test_model("test_alflmm", second_model=True) project_state = self.apply_operations("test_alflmm", project_state, operations=[ migrations.AddField("Pony", "stables", models.ManyToManyField("Stable", related_name="ponies")) ]) Pony = project_state.apps.get_model("test_alflmm", "Pony") self.assertFalse(Pony._meta.get_field('stables').blank) project_state = self.apply_operations("test_alflmm", project_state, operations=[ migrations.AlterField( "Pony", "stables", models.ManyToManyField(to="Stable", related_name="ponies", blank=True) ) ]) Pony = project_state.apps.get_model("test_alflmm", "Pony") self.assertTrue(Pony._meta.get_field('stables').blank) def test_repoint_field_m2m(self): project_state = self.set_up_test_model("test_alflmm", second_model=True, third_model=True) project_state = self.apply_operations("test_alflmm", project_state, operations=[ migrations.AddField("Pony", "places", models.ManyToManyField("Stable", related_name="ponies")) ]) Pony = project_state.apps.get_model("test_alflmm", "Pony") project_state = self.apply_operations("test_alflmm", project_state, operations=[ migrations.AlterField("Pony", "places", models.ManyToManyField(to="Van", related_name="ponies")) ]) # Ensure the new field actually works Pony = project_state.apps.get_model("test_alflmm", "Pony") p = Pony.objects.create(pink=False, weight=4.55) p.places.create() self.assertEqual(p.places.count(), 1) p.places.all().delete() def test_remove_field_m2m(self): project_state = self.set_up_test_model("test_rmflmm", second_model=True) project_state = self.apply_operations("test_rmflmm", project_state, operations=[ migrations.AddField("Pony", "stables", models.ManyToManyField("Stable", related_name="ponies")) ]) self.assertTableExists("test_rmflmm_pony_stables") with_field_state = project_state.clone() operations = [migrations.RemoveField("Pony", "stables")] project_state = self.apply_operations("test_rmflmm", project_state, operations=operations) self.assertTableNotExists("test_rmflmm_pony_stables") # And test reversal self.unapply_operations("test_rmflmm", with_field_state, operations=operations) self.assertTableExists("test_rmflmm_pony_stables") def test_remove_field_m2m_with_through(self): project_state = self.set_up_test_model("test_rmflmmwt", second_model=True) self.assertTableNotExists("test_rmflmmwt_ponystables") project_state = self.apply_operations("test_rmflmmwt", project_state, operations=[ migrations.CreateModel("PonyStables", fields=[ ("pony", models.ForeignKey('test_rmflmmwt.Pony', models.CASCADE)), ("stable", models.ForeignKey('test_rmflmmwt.Stable', models.CASCADE)), ]), migrations.AddField( "Pony", "stables", models.ManyToManyField("Stable", related_name="ponies", through='test_rmflmmwt.PonyStables') ) ]) self.assertTableExists("test_rmflmmwt_ponystables") operations = [migrations.RemoveField("Pony", "stables"), migrations.DeleteModel("PonyStables")] self.apply_operations("test_rmflmmwt", project_state, operations=operations) def test_remove_field(self): """ Tests the RemoveField operation. """ project_state = self.set_up_test_model("test_rmfl") # Test the state alteration operation = migrations.RemoveField("Pony", "pink") self.assertEqual(operation.describe(), "Remove field pink from Pony") new_state = project_state.clone() operation.state_forwards("test_rmfl", new_state) self.assertEqual(len(new_state.models["test_rmfl", "pony"].fields), 2) # Test the database alteration self.assertColumnExists("test_rmfl_pony", "pink") with connection.schema_editor() as editor: operation.database_forwards("test_rmfl", editor, project_state, new_state) self.assertColumnNotExists("test_rmfl_pony", "pink") # And test reversal with connection.schema_editor() as editor: operation.database_backwards("test_rmfl", editor, new_state, project_state) self.assertColumnExists("test_rmfl_pony", "pink") # And deconstruction definition = operation.deconstruct() self.assertEqual(definition[0], "RemoveField") self.assertEqual(definition[1], []) self.assertEqual(definition[2], {'model_name': "Pony", 'name': 'pink'}) def test_remove_fk(self): """ Tests the RemoveField operation on a foreign key. """ project_state = self.set_up_test_model("test_rfk", related_model=True) self.assertColumnExists("test_rfk_rider", "pony_id") operation = migrations.RemoveField("Rider", "pony") new_state = project_state.clone() operation.state_forwards("test_rfk", new_state) with connection.schema_editor() as editor: operation.database_forwards("test_rfk", editor, project_state, new_state) self.assertColumnNotExists("test_rfk_rider", "pony_id") with connection.schema_editor() as editor: operation.database_backwards("test_rfk", editor, new_state, project_state) self.assertColumnExists("test_rfk_rider", "pony_id") def test_alter_model_table(self): """ Tests the AlterModelTable operation. """ project_state = self.set_up_test_model("test_almota") # Test the state alteration operation = migrations.AlterModelTable("Pony", "test_almota_pony_2") self.assertEqual(operation.describe(), "Rename table for Pony to test_almota_pony_2") new_state = project_state.clone() operation.state_forwards("test_almota", new_state) self.assertEqual(new_state.models["test_almota", "pony"].options["db_table"], "test_almota_pony_2") # Test the database alteration self.assertTableExists("test_almota_pony") self.assertTableNotExists("test_almota_pony_2") with connection.schema_editor() as editor: operation.database_forwards("test_almota", editor, project_state, new_state) self.assertTableNotExists("test_almota_pony") self.assertTableExists("test_almota_pony_2") # And test reversal with connection.schema_editor() as editor: operation.database_backwards("test_almota", editor, new_state, project_state) self.assertTableExists("test_almota_pony") self.assertTableNotExists("test_almota_pony_2") # And deconstruction definition = operation.deconstruct() self.assertEqual(definition[0], "AlterModelTable") self.assertEqual(definition[1], []) self.assertEqual(definition[2], {'name': "Pony", 'table': "test_almota_pony_2"}) def test_alter_model_table_none(self): """ Tests the AlterModelTable operation if the table name is set to None. """ operation = migrations.AlterModelTable("Pony", None) self.assertEqual(operation.describe(), "Rename table for Pony to (default)") def test_alter_model_table_noop(self): """ Tests the AlterModelTable operation if the table name is not changed. """ project_state = self.set_up_test_model("test_almota") # Test the state alteration operation = migrations.AlterModelTable("Pony", "test_almota_pony") new_state = project_state.clone() operation.state_forwards("test_almota", new_state) self.assertEqual(new_state.models["test_almota", "pony"].options["db_table"], "test_almota_pony") # Test the database alteration self.assertTableExists("test_almota_pony") with connection.schema_editor() as editor: operation.database_forwards("test_almota", editor, project_state, new_state) self.assertTableExists("test_almota_pony") # And test reversal with connection.schema_editor() as editor: operation.database_backwards("test_almota", editor, new_state, project_state) self.assertTableExists("test_almota_pony") def test_alter_model_table_m2m(self): """ AlterModelTable should rename auto-generated M2M tables. """ app_label = "test_talflmltlm2m" pony_db_table = 'pony_foo' project_state = self.set_up_test_model(app_label, second_model=True, db_table=pony_db_table) # Add the M2M field first_state = project_state.clone() operation = migrations.AddField("Pony", "stables", models.ManyToManyField("Stable")) operation.state_forwards(app_label, first_state) with connection.schema_editor() as editor: operation.database_forwards(app_label, editor, project_state, first_state) original_m2m_table = "%s_%s" % (pony_db_table, "stables") new_m2m_table = "%s_%s" % (app_label, "pony_stables") self.assertTableExists(original_m2m_table) self.assertTableNotExists(new_m2m_table) # Rename the Pony db_table which should also rename the m2m table. second_state = first_state.clone() operation = migrations.AlterModelTable(name='pony', table=None) operation.state_forwards(app_label, second_state) atomic_rename = connection.features.supports_atomic_references_rename with connection.schema_editor(atomic=atomic_rename) as editor: operation.database_forwards(app_label, editor, first_state, second_state) self.assertTableExists(new_m2m_table) self.assertTableNotExists(original_m2m_table) # And test reversal with connection.schema_editor(atomic=atomic_rename) as editor: operation.database_backwards(app_label, editor, second_state, first_state) self.assertTableExists(original_m2m_table) self.assertTableNotExists(new_m2m_table) def test_alter_field(self): """ Tests the AlterField operation. """ project_state = self.set_up_test_model("test_alfl") # Test the state alteration operation = migrations.AlterField("Pony", "pink", models.IntegerField(null=True)) self.assertEqual(operation.describe(), "Alter field pink on Pony") new_state = project_state.clone() operation.state_forwards("test_alfl", new_state) self.assertIs(project_state.models["test_alfl", "pony"].get_field_by_name("pink").null, False) self.assertIs(new_state.models["test_alfl", "pony"].get_field_by_name("pink").null, True) # Test the database alteration self.assertColumnNotNull("test_alfl_pony", "pink") with connection.schema_editor() as editor: operation.database_forwards("test_alfl", editor, project_state, new_state) self.assertColumnNull("test_alfl_pony", "pink") # And test reversal with connection.schema_editor() as editor: operation.database_backwards("test_alfl", editor, new_state, project_state) self.assertColumnNotNull("test_alfl_pony", "pink") # And deconstruction definition = operation.deconstruct() self.assertEqual(definition[0], "AlterField") self.assertEqual(definition[1], []) self.assertEqual(sorted(definition[2]), ["field", "model_name", "name"]) def test_alter_field_pk(self): """ Tests the AlterField operation on primary keys (for things like PostgreSQL's SERIAL weirdness) """ project_state = self.set_up_test_model("test_alflpk") # Test the state alteration operation = migrations.AlterField("Pony", "id", models.IntegerField(primary_key=True)) new_state = project_state.clone() operation.state_forwards("test_alflpk", new_state) self.assertIsInstance(project_state.models["test_alflpk", "pony"].get_field_by_name("id"), models.AutoField) self.assertIsInstance(new_state.models["test_alflpk", "pony"].get_field_by_name("id"), models.IntegerField) # Test the database alteration with connection.schema_editor() as editor: operation.database_forwards("test_alflpk", editor, project_state, new_state) # And test reversal with connection.schema_editor() as editor: operation.database_backwards("test_alflpk", editor, new_state, project_state) @skipUnlessDBFeature('supports_foreign_keys') def test_alter_field_pk_fk(self): """ Tests the AlterField operation on primary keys changes any FKs pointing to it. """ project_state = self.set_up_test_model("test_alflpkfk", related_model=True) # Test the state alteration operation = migrations.AlterField("Pony", "id", models.FloatField(primary_key=True)) new_state = project_state.clone() operation.state_forwards("test_alflpkfk", new_state) self.assertIsInstance(project_state.models["test_alflpkfk", "pony"].get_field_by_name("id"), models.AutoField) self.assertIsInstance(new_state.models["test_alflpkfk", "pony"].get_field_by_name("id"), models.FloatField) def assertIdTypeEqualsFkType(): with connection.cursor() as cursor: id_type, id_null = [ (c.type_code, c.null_ok) for c in connection.introspection.get_table_description(cursor, "test_alflpkfk_pony") if c.name == "id" ][0] fk_type, fk_null = [ (c.type_code, c.null_ok) for c in connection.introspection.get_table_description(cursor, "test_alflpkfk_rider") if c.name == "pony_id" ][0] self.assertEqual(id_type, fk_type) self.assertEqual(id_null, fk_null) assertIdTypeEqualsFkType() # Test the database alteration with connection.schema_editor() as editor: operation.database_forwards("test_alflpkfk", editor, project_state, new_state) assertIdTypeEqualsFkType() # And test reversal with connection.schema_editor() as editor: operation.database_backwards("test_alflpkfk", editor, new_state, project_state) assertIdTypeEqualsFkType() def test_alter_field_reloads_state_on_fk_target_changes(self): """ If AlterField doesn't reload state appropriately, the second AlterField crashes on MySQL due to not dropping the PonyRider.pony foreign key constraint before modifying the column. """ app_label = 'alter_alter_field_reloads_state_on_fk_target_changes' project_state = self.apply_operations(app_label, ProjectState(), operations=[ migrations.CreateModel('Rider', fields=[ ('id', models.CharField(primary_key=True, max_length=100)), ]), migrations.CreateModel('Pony', fields=[ ('id', models.CharField(primary_key=True, max_length=100)), ('rider', models.ForeignKey('%s.Rider' % app_label, models.CASCADE)), ]), migrations.CreateModel('PonyRider', fields=[ ('id', models.AutoField(primary_key=True)), ('pony', models.ForeignKey('%s.Pony' % app_label, models.CASCADE)), ]), ]) project_state = self.apply_operations(app_label, project_state, operations=[ migrations.AlterField('Rider', 'id', models.CharField(primary_key=True, max_length=99)), migrations.AlterField('Pony', 'id', models.CharField(primary_key=True, max_length=99)), ]) def test_alter_field_reloads_state_on_fk_with_to_field_target_changes(self): """ If AlterField doesn't reload state appropriately, the second AlterField crashes on MySQL due to not dropping the PonyRider.pony foreign key constraint before modifying the column. """ app_label = 'alter_alter_field_reloads_state_on_fk_with_to_field_target_changes' project_state = self.apply_operations(app_label, ProjectState(), operations=[ migrations.CreateModel('Rider', fields=[ ('id', models.CharField(primary_key=True, max_length=100)), ('slug', models.CharField(unique=True, max_length=100)), ]), migrations.CreateModel('Pony', fields=[ ('id', models.CharField(primary_key=True, max_length=100)), ('rider', models.ForeignKey('%s.Rider' % app_label, models.CASCADE, to_field='slug')), ('slug', models.CharField(unique=True, max_length=100)), ]), migrations.CreateModel('PonyRider', fields=[ ('id', models.AutoField(primary_key=True)), ('pony', models.ForeignKey('%s.Pony' % app_label, models.CASCADE, to_field='slug')), ]), ]) project_state = self.apply_operations(app_label, project_state, operations=[ migrations.AlterField('Rider', 'slug', models.CharField(unique=True, max_length=99)), migrations.AlterField('Pony', 'slug', models.CharField(unique=True, max_length=99)), ]) def test_rename_field_reloads_state_on_fk_target_changes(self): """ If RenameField doesn't reload state appropriately, the AlterField crashes on MySQL due to not dropping the PonyRider.pony foreign key constraint before modifying the column. """ app_label = 'alter_rename_field_reloads_state_on_fk_target_changes' project_state = self.apply_operations(app_label, ProjectState(), operations=[ migrations.CreateModel('Rider', fields=[ ('id', models.CharField(primary_key=True, max_length=100)), ]), migrations.CreateModel('Pony', fields=[ ('id', models.CharField(primary_key=True, max_length=100)), ('rider', models.ForeignKey('%s.Rider' % app_label, models.CASCADE)), ]), migrations.CreateModel('PonyRider', fields=[ ('id', models.AutoField(primary_key=True)), ('pony', models.ForeignKey('%s.Pony' % app_label, models.CASCADE)), ]), ]) project_state = self.apply_operations(app_label, project_state, operations=[ migrations.RenameField('Rider', 'id', 'id2'), migrations.AlterField('Pony', 'id', models.CharField(primary_key=True, max_length=99)), ], atomic=connection.features.supports_atomic_references_rename) def test_rename_field(self): """ Tests the RenameField operation. """ project_state = self.set_up_test_model("test_rnfl", unique_together=True, index_together=True) # Test the state alteration operation = migrations.RenameField("Pony", "pink", "blue") self.assertEqual(operation.describe(), "Rename field pink on Pony to blue") new_state = project_state.clone() operation.state_forwards("test_rnfl", new_state) self.assertIn("blue", [n for n, f in new_state.models["test_rnfl", "pony"].fields]) self.assertNotIn("pink", [n for n, f in new_state.models["test_rnfl", "pony"].fields]) # Make sure the unique_together has the renamed column too self.assertIn("blue", new_state.models["test_rnfl", "pony"].options['unique_together'][0]) self.assertNotIn("pink", new_state.models["test_rnfl", "pony"].options['unique_together'][0]) # Make sure the index_together has the renamed column too self.assertIn("blue", new_state.models["test_rnfl", "pony"].options['index_together'][0]) self.assertNotIn("pink", new_state.models["test_rnfl", "pony"].options['index_together'][0]) # Test the database alteration self.assertColumnExists("test_rnfl_pony", "pink") self.assertColumnNotExists("test_rnfl_pony", "blue") with connection.schema_editor() as editor: operation.database_forwards("test_rnfl", editor, project_state, new_state) self.assertColumnExists("test_rnfl_pony", "blue") self.assertColumnNotExists("test_rnfl_pony", "pink") # Ensure the unique constraint has been ported over with connection.cursor() as cursor: cursor.execute("INSERT INTO test_rnfl_pony (blue, weight) VALUES (1, 1)") with self.assertRaises(IntegrityError): with atomic(): cursor.execute("INSERT INTO test_rnfl_pony (blue, weight) VALUES (1, 1)") cursor.execute("DELETE FROM test_rnfl_pony") # Ensure the index constraint has been ported over self.assertIndexExists("test_rnfl_pony", ["weight", "blue"]) # And test reversal with connection.schema_editor() as editor: operation.database_backwards("test_rnfl", editor, new_state, project_state) self.assertColumnExists("test_rnfl_pony", "pink") self.assertColumnNotExists("test_rnfl_pony", "blue") # Ensure the index constraint has been reset self.assertIndexExists("test_rnfl_pony", ["weight", "pink"]) # And deconstruction definition = operation.deconstruct() self.assertEqual(definition[0], "RenameField") self.assertEqual(definition[1], []) self.assertEqual(definition[2], {'model_name': "Pony", 'old_name': "pink", 'new_name': "blue"}) def test_rename_missing_field(self): state = ProjectState() state.add_model(ModelState('app', 'model', [])) with self.assertRaisesMessage(FieldDoesNotExist, "app.model has no field named 'field'"): migrations.RenameField('model', 'field', 'new_field').state_forwards('app', state) def test_rename_referenced_field_state_forward(self): state = ProjectState() state.add_model(ModelState('app', 'Model', [ ('id', models.AutoField(primary_key=True)), ('field', models.IntegerField(unique=True)), ])) state.add_model(ModelState('app', 'OtherModel', [ ('id', models.AutoField(primary_key=True)), ('fk', models.ForeignKey('Model', models.CASCADE, to_field='field')), ('fo', models.ForeignObject('Model', models.CASCADE, from_fields=('fk',), to_fields=('field',))), ])) operation = migrations.RenameField('Model', 'field', 'renamed') new_state = state.clone() operation.state_forwards('app', new_state) self.assertEqual(new_state.models['app', 'othermodel'].fields[1][1].remote_field.field_name, 'renamed') self.assertEqual(new_state.models['app', 'othermodel'].fields[1][1].from_fields, ['self']) self.assertEqual(new_state.models['app', 'othermodel'].fields[1][1].to_fields, ('renamed',)) self.assertEqual(new_state.models['app', 'othermodel'].fields[2][1].from_fields, ('fk',)) self.assertEqual(new_state.models['app', 'othermodel'].fields[2][1].to_fields, ('renamed',)) operation = migrations.RenameField('OtherModel', 'fk', 'renamed_fk') new_state = state.clone() operation.state_forwards('app', new_state) self.assertEqual(new_state.models['app', 'othermodel'].fields[1][1].remote_field.field_name, 'renamed') self.assertEqual(new_state.models['app', 'othermodel'].fields[1][1].from_fields, ('self',)) self.assertEqual(new_state.models['app', 'othermodel'].fields[1][1].to_fields, ('renamed',)) self.assertEqual(new_state.models['app', 'othermodel'].fields[2][1].from_fields, ('renamed_fk',)) self.assertEqual(new_state.models['app', 'othermodel'].fields[2][1].to_fields, ('renamed',)) def test_alter_unique_together(self): """ Tests the AlterUniqueTogether operation. """ project_state = self.set_up_test_model("test_alunto") # Test the state alteration operation = migrations.AlterUniqueTogether("Pony", [("pink", "weight")]) self.assertEqual(operation.describe(), "Alter unique_together for Pony (1 constraint(s))") new_state = project_state.clone() operation.state_forwards("test_alunto", new_state) self.assertEqual(len(project_state.models["test_alunto", "pony"].options.get("unique_together", set())), 0) self.assertEqual(len(new_state.models["test_alunto", "pony"].options.get("unique_together", set())), 1) # Make sure we can insert duplicate rows with connection.cursor() as cursor: cursor.execute("INSERT INTO test_alunto_pony (pink, weight) VALUES (1, 1)") cursor.execute("INSERT INTO test_alunto_pony (pink, weight) VALUES (1, 1)") cursor.execute("DELETE FROM test_alunto_pony") # Test the database alteration with connection.schema_editor() as editor: operation.database_forwards("test_alunto", editor, project_state, new_state) cursor.execute("INSERT INTO test_alunto_pony (pink, weight) VALUES (1, 1)") with self.assertRaises(IntegrityError): with atomic(): cursor.execute("INSERT INTO test_alunto_pony (pink, weight) VALUES (1, 1)") cursor.execute("DELETE FROM test_alunto_pony") # And test reversal with connection.schema_editor() as editor: operation.database_backwards("test_alunto", editor, new_state, project_state) cursor.execute("INSERT INTO test_alunto_pony (pink, weight) VALUES (1, 1)") cursor.execute("INSERT INTO test_alunto_pony (pink, weight) VALUES (1, 1)") cursor.execute("DELETE FROM test_alunto_pony") # Test flat unique_together operation = migrations.AlterUniqueTogether("Pony", ("pink", "weight")) operation.state_forwards("test_alunto", new_state) self.assertEqual(len(new_state.models["test_alunto", "pony"].options.get("unique_together", set())), 1) # And deconstruction definition = operation.deconstruct() self.assertEqual(definition[0], "AlterUniqueTogether") self.assertEqual(definition[1], []) self.assertEqual(definition[2], {'name': "Pony", 'unique_together': {("pink", "weight")}}) def test_alter_unique_together_remove(self): operation = migrations.AlterUniqueTogether("Pony", None) self.assertEqual(operation.describe(), "Alter unique_together for Pony (0 constraint(s))") def test_add_index(self): """ Test the AddIndex operation. """ project_state = self.set_up_test_model("test_adin") msg = ( "Indexes passed to AddIndex operations require a name argument. " "<Index: fields='pink'> doesn't have one." ) with self.assertRaisesMessage(ValueError, msg): migrations.AddIndex("Pony", models.Index(fields=["pink"])) index = models.Index(fields=["pink"], name="test_adin_pony_pink_idx") operation = migrations.AddIndex("Pony", index) self.assertEqual(operation.describe(), "Create index test_adin_pony_pink_idx on field(s) pink of model Pony") new_state = project_state.clone() operation.state_forwards("test_adin", new_state) # Test the database alteration self.assertEqual(len(new_state.models["test_adin", "pony"].options['indexes']), 1) self.assertIndexNotExists("test_adin_pony", ["pink"]) with connection.schema_editor() as editor: operation.database_forwards("test_adin", editor, project_state, new_state) self.assertIndexExists("test_adin_pony", ["pink"]) # And test reversal with connection.schema_editor() as editor: operation.database_backwards("test_adin", editor, new_state, project_state) self.assertIndexNotExists("test_adin_pony", ["pink"]) # And deconstruction definition = operation.deconstruct() self.assertEqual(definition[0], "AddIndex") self.assertEqual(definition[1], []) self.assertEqual(definition[2], {'model_name': "Pony", 'index': index}) def test_remove_index(self): """ Test the RemoveIndex operation. """ project_state = self.set_up_test_model("test_rmin", multicol_index=True) self.assertTableExists("test_rmin_pony") self.assertIndexExists("test_rmin_pony", ["pink", "weight"]) operation = migrations.RemoveIndex("Pony", "pony_test_idx") self.assertEqual(operation.describe(), "Remove index pony_test_idx from Pony") new_state = project_state.clone() operation.state_forwards("test_rmin", new_state) # Test the state alteration self.assertEqual(len(new_state.models["test_rmin", "pony"].options['indexes']), 0) self.assertIndexExists("test_rmin_pony", ["pink", "weight"]) # Test the database alteration with connection.schema_editor() as editor: operation.database_forwards("test_rmin", editor, project_state, new_state) self.assertIndexNotExists("test_rmin_pony", ["pink", "weight"]) # And test reversal with connection.schema_editor() as editor: operation.database_backwards("test_rmin", editor, new_state, project_state) self.assertIndexExists("test_rmin_pony", ["pink", "weight"]) # And deconstruction definition = operation.deconstruct() self.assertEqual(definition[0], "RemoveIndex") self.assertEqual(definition[1], []) self.assertEqual(definition[2], {'model_name': "Pony", 'name': "pony_test_idx"}) # Also test a field dropped with index - sqlite remake issue operations = [ migrations.RemoveIndex("Pony", "pony_test_idx"), migrations.RemoveField("Pony", "pink"), ] self.assertColumnExists("test_rmin_pony", "pink") self.assertIndexExists("test_rmin_pony", ["pink", "weight"]) # Test database alteration new_state = project_state.clone() self.apply_operations('test_rmin', new_state, operations=operations) self.assertColumnNotExists("test_rmin_pony", "pink") self.assertIndexNotExists("test_rmin_pony", ["pink", "weight"]) # And test reversal self.unapply_operations("test_rmin", project_state, operations=operations) self.assertIndexExists("test_rmin_pony", ["pink", "weight"]) def test_add_index_state_forwards(self): project_state = self.set_up_test_model('test_adinsf') index = models.Index(fields=['pink'], name='test_adinsf_pony_pink_idx') old_model = project_state.apps.get_model('test_adinsf', 'Pony') new_state = project_state.clone() operation = migrations.AddIndex('Pony', index) operation.state_forwards('test_adinsf', new_state) new_model = new_state.apps.get_model('test_adinsf', 'Pony') self.assertIsNot(old_model, new_model) def test_remove_index_state_forwards(self): project_state = self.set_up_test_model('test_rminsf') index = models.Index(fields=['pink'], name='test_rminsf_pony_pink_idx') migrations.AddIndex('Pony', index).state_forwards('test_rminsf', project_state) old_model = project_state.apps.get_model('test_rminsf', 'Pony') new_state = project_state.clone() operation = migrations.RemoveIndex('Pony', 'test_rminsf_pony_pink_idx') operation.state_forwards('test_rminsf', new_state) new_model = new_state.apps.get_model('test_rminsf', 'Pony') self.assertIsNot(old_model, new_model) def test_alter_field_with_index(self): """ Test AlterField operation with an index to ensure indexes created via Meta.indexes don't get dropped with sqlite3 remake. """ project_state = self.set_up_test_model("test_alflin", index=True) operation = migrations.AlterField("Pony", "pink", models.IntegerField(null=True)) new_state = project_state.clone() operation.state_forwards("test_alflin", new_state) # Test the database alteration self.assertColumnNotNull("test_alflin_pony", "pink") with connection.schema_editor() as editor: operation.database_forwards("test_alflin", editor, project_state, new_state) # Index hasn't been dropped self.assertIndexExists("test_alflin_pony", ["pink"]) # And test reversal with connection.schema_editor() as editor: operation.database_backwards("test_alflin", editor, new_state, project_state) # Ensure the index is still there self.assertIndexExists("test_alflin_pony", ["pink"]) def test_alter_index_together(self): """ Tests the AlterIndexTogether operation. """ project_state = self.set_up_test_model("test_alinto") # Test the state alteration operation = migrations.AlterIndexTogether("Pony", [("pink", "weight")]) self.assertEqual(operation.describe(), "Alter index_together for Pony (1 constraint(s))") new_state = project_state.clone() operation.state_forwards("test_alinto", new_state) self.assertEqual(len(project_state.models["test_alinto", "pony"].options.get("index_together", set())), 0) self.assertEqual(len(new_state.models["test_alinto", "pony"].options.get("index_together", set())), 1) # Make sure there's no matching index self.assertIndexNotExists("test_alinto_pony", ["pink", "weight"]) # Test the database alteration with connection.schema_editor() as editor: operation.database_forwards("test_alinto", editor, project_state, new_state) self.assertIndexExists("test_alinto_pony", ["pink", "weight"]) # And test reversal with connection.schema_editor() as editor: operation.database_backwards("test_alinto", editor, new_state, project_state) self.assertIndexNotExists("test_alinto_pony", ["pink", "weight"]) # And deconstruction definition = operation.deconstruct() self.assertEqual(definition[0], "AlterIndexTogether") self.assertEqual(definition[1], []) self.assertEqual(definition[2], {'name': "Pony", 'index_together': {("pink", "weight")}}) def test_alter_index_together_remove(self): operation = migrations.AlterIndexTogether("Pony", None) self.assertEqual(operation.describe(), "Alter index_together for Pony (0 constraint(s))") @skipUnlessDBFeature('supports_table_check_constraints') def test_add_constraint(self): project_state = self.set_up_test_model("test_addconstraint") gt_check = models.Q(pink__gt=2) gt_constraint = models.CheckConstraint(check=gt_check, name="test_add_constraint_pony_pink_gt_2") gt_operation = migrations.AddConstraint("Pony", gt_constraint) self.assertEqual( gt_operation.describe(), "Create constraint test_add_constraint_pony_pink_gt_2 on model Pony" ) # Test the state alteration new_state = project_state.clone() gt_operation.state_forwards("test_addconstraint", new_state) self.assertEqual(len(new_state.models["test_addconstraint", "pony"].options["constraints"]), 1) Pony = new_state.apps.get_model("test_addconstraint", "Pony") self.assertEqual(len(Pony._meta.constraints), 1) # Test the database alteration with connection.schema_editor() as editor: gt_operation.database_forwards("test_addconstraint", editor, project_state, new_state) with self.assertRaises(IntegrityError), transaction.atomic(): Pony.objects.create(pink=1, weight=1.0) # Add another one. lt_check = models.Q(pink__lt=100) lt_constraint = models.CheckConstraint(check=lt_check, name="test_add_constraint_pony_pink_lt_100") lt_operation = migrations.AddConstraint("Pony", lt_constraint) lt_operation.state_forwards("test_addconstraint", new_state) self.assertEqual(len(new_state.models["test_addconstraint", "pony"].options["constraints"]), 2) Pony = new_state.apps.get_model("test_addconstraint", "Pony") self.assertEqual(len(Pony._meta.constraints), 2) with connection.schema_editor() as editor: lt_operation.database_forwards("test_addconstraint", editor, project_state, new_state) with self.assertRaises(IntegrityError), transaction.atomic(): Pony.objects.create(pink=100, weight=1.0) # Test reversal with connection.schema_editor() as editor: gt_operation.database_backwards("test_addconstraint", editor, new_state, project_state) Pony.objects.create(pink=1, weight=1.0) # Test deconstruction definition = gt_operation.deconstruct() self.assertEqual(definition[0], "AddConstraint") self.assertEqual(definition[1], []) self.assertEqual(definition[2], {'model_name': "Pony", 'constraint': gt_constraint}) @skipUnlessDBFeature('supports_table_check_constraints') def test_add_constraint_percent_escaping(self): app_label = 'add_constraint_string_quoting' operations = [ CreateModel( 'Author', fields=[ ('id', models.AutoField(primary_key=True)), ('name', models.CharField(max_length=100)), ('rebate', models.CharField(max_length=100)), ], ), ] from_state = self.apply_operations(app_label, ProjectState(), operations) # "%" generated in startswith lookup should be escaped in a way that is # considered a leading wildcard. check = models.Q(name__startswith='Albert') constraint = models.CheckConstraint(check=check, name='name_constraint') operation = migrations.AddConstraint('Author', constraint) to_state = from_state.clone() operation.state_forwards(app_label, to_state) with connection.schema_editor() as editor: operation.database_forwards(app_label, editor, from_state, to_state) Author = to_state.apps.get_model(app_label, 'Author') with self.assertRaises(IntegrityError), transaction.atomic(): Author.objects.create(name='Artur') # Literal "%" should be escaped in a way that is not a considered a # wildcard. check = models.Q(rebate__endswith='%') constraint = models.CheckConstraint(check=check, name='rebate_constraint') operation = migrations.AddConstraint('Author', constraint) from_state = to_state to_state = from_state.clone() operation.state_forwards(app_label, to_state) Author = to_state.apps.get_model(app_label, 'Author') with connection.schema_editor() as editor: operation.database_forwards(app_label, editor, from_state, to_state) Author = to_state.apps.get_model(app_label, 'Author') with self.assertRaises(IntegrityError), transaction.atomic(): Author.objects.create(name='Albert', rebate='10$') author = Author.objects.create(name='Albert', rebate='10%') self.assertEqual(Author.objects.get(), author) @skipUnlessDBFeature('supports_table_check_constraints') def test_add_or_constraint(self): app_label = 'test_addorconstraint' constraint_name = 'add_constraint_or' from_state = self.set_up_test_model(app_label) check = models.Q(pink__gt=2, weight__gt=2) | models.Q(weight__lt=0) constraint = models.CheckConstraint(check=check, name=constraint_name) operation = migrations.AddConstraint('Pony', constraint) to_state = from_state.clone() operation.state_forwards(app_label, to_state) with connection.schema_editor() as editor: operation.database_forwards(app_label, editor, from_state, to_state) Pony = to_state.apps.get_model(app_label, 'Pony') with self.assertRaises(IntegrityError), transaction.atomic(): Pony.objects.create(pink=2, weight=3.0) with self.assertRaises(IntegrityError), transaction.atomic(): Pony.objects.create(pink=3, weight=1.0) Pony.objects.bulk_create([ Pony(pink=3, weight=-1.0), Pony(pink=1, weight=-1.0), Pony(pink=3, weight=3.0), ]) @skipUnlessDBFeature('supports_table_check_constraints') def test_remove_constraint(self): project_state = self.set_up_test_model("test_removeconstraint", constraints=[ models.CheckConstraint(check=models.Q(pink__gt=2), name="test_remove_constraint_pony_pink_gt_2"), models.CheckConstraint(check=models.Q(pink__lt=100), name="test_remove_constraint_pony_pink_lt_100"), ]) gt_operation = migrations.RemoveConstraint("Pony", "test_remove_constraint_pony_pink_gt_2") self.assertEqual( gt_operation.describe(), "Remove constraint test_remove_constraint_pony_pink_gt_2 from model Pony" ) # Test state alteration new_state = project_state.clone() gt_operation.state_forwards("test_removeconstraint", new_state) self.assertEqual(len(new_state.models["test_removeconstraint", "pony"].options['constraints']), 1) Pony = new_state.apps.get_model("test_removeconstraint", "Pony") self.assertEqual(len(Pony._meta.constraints), 1) # Test database alteration with connection.schema_editor() as editor: gt_operation.database_forwards("test_removeconstraint", editor, project_state, new_state) Pony.objects.create(pink=1, weight=1.0).delete() with self.assertRaises(IntegrityError), transaction.atomic(): Pony.objects.create(pink=100, weight=1.0) # Remove the other one. lt_operation = migrations.RemoveConstraint("Pony", "test_remove_constraint_pony_pink_lt_100") lt_operation.state_forwards("test_removeconstraint", new_state) self.assertEqual(len(new_state.models["test_removeconstraint", "pony"].options['constraints']), 0) Pony = new_state.apps.get_model("test_removeconstraint", "Pony") self.assertEqual(len(Pony._meta.constraints), 0) with connection.schema_editor() as editor: lt_operation.database_forwards("test_removeconstraint", editor, project_state, new_state) Pony.objects.create(pink=100, weight=1.0).delete() # Test reversal with connection.schema_editor() as editor: gt_operation.database_backwards("test_removeconstraint", editor, new_state, project_state) with self.assertRaises(IntegrityError), transaction.atomic(): Pony.objects.create(pink=1, weight=1.0) # Test deconstruction definition = gt_operation.deconstruct() self.assertEqual(definition[0], "RemoveConstraint") self.assertEqual(definition[1], []) self.assertEqual(definition[2], {'model_name': "Pony", 'name': "test_remove_constraint_pony_pink_gt_2"}) def test_add_partial_unique_constraint(self): project_state = self.set_up_test_model('test_addpartialuniqueconstraint') partial_unique_constraint = models.UniqueConstraint( fields=['pink'], condition=models.Q(weight__gt=5), name='test_constraint_pony_pink_for_weight_gt_5_uniq', ) operation = migrations.AddConstraint('Pony', partial_unique_constraint) self.assertEqual( operation.describe(), 'Create constraint test_constraint_pony_pink_for_weight_gt_5_uniq ' 'on model Pony' ) # Test the state alteration new_state = project_state.clone() operation.state_forwards('test_addpartialuniqueconstraint', new_state) self.assertEqual(len(new_state.models['test_addpartialuniqueconstraint', 'pony'].options['constraints']), 1) Pony = new_state.apps.get_model('test_addpartialuniqueconstraint', 'Pony') self.assertEqual(len(Pony._meta.constraints), 1) # Test the database alteration with connection.schema_editor() as editor: operation.database_forwards('test_addpartialuniqueconstraint', editor, project_state, new_state) # Test constraint works Pony.objects.create(pink=1, weight=4.0) Pony.objects.create(pink=1, weight=4.0) Pony.objects.create(pink=1, weight=6.0) if connection.features.supports_partial_indexes: with self.assertRaises(IntegrityError), transaction.atomic(): Pony.objects.create(pink=1, weight=7.0) else: Pony.objects.create(pink=1, weight=7.0) # Test reversal with connection.schema_editor() as editor: operation.database_backwards('test_addpartialuniqueconstraint', editor, new_state, project_state) # Test constraint doesn't work Pony.objects.create(pink=1, weight=7.0) # Test deconstruction definition = operation.deconstruct() self.assertEqual(definition[0], 'AddConstraint') self.assertEqual(definition[1], []) self.assertEqual(definition[2], {'model_name': 'Pony', 'constraint': partial_unique_constraint}) def test_remove_partial_unique_constraint(self): project_state = self.set_up_test_model('test_removepartialuniqueconstraint', constraints=[ models.UniqueConstraint( fields=['pink'], condition=models.Q(weight__gt=5), name='test_constraint_pony_pink_for_weight_gt_5_uniq', ), ]) gt_operation = migrations.RemoveConstraint('Pony', 'test_constraint_pony_pink_for_weight_gt_5_uniq') self.assertEqual( gt_operation.describe(), 'Remove constraint test_constraint_pony_pink_for_weight_gt_5_uniq from model Pony' ) # Test state alteration new_state = project_state.clone() gt_operation.state_forwards('test_removepartialuniqueconstraint', new_state) self.assertEqual(len(new_state.models['test_removepartialuniqueconstraint', 'pony'].options['constraints']), 0) Pony = new_state.apps.get_model('test_removepartialuniqueconstraint', 'Pony') self.assertEqual(len(Pony._meta.constraints), 0) # Test database alteration with connection.schema_editor() as editor: gt_operation.database_forwards('test_removepartialuniqueconstraint', editor, project_state, new_state) # Test constraint doesn't work Pony.objects.create(pink=1, weight=4.0) Pony.objects.create(pink=1, weight=4.0) Pony.objects.create(pink=1, weight=6.0) Pony.objects.create(pink=1, weight=7.0).delete() # Test reversal with connection.schema_editor() as editor: gt_operation.database_backwards('test_removepartialuniqueconstraint', editor, new_state, project_state) # Test constraint works if connection.features.supports_partial_indexes: with self.assertRaises(IntegrityError), transaction.atomic(): Pony.objects.create(pink=1, weight=7.0) else: Pony.objects.create(pink=1, weight=7.0) # Test deconstruction definition = gt_operation.deconstruct() self.assertEqual(definition[0], 'RemoveConstraint') self.assertEqual(definition[1], []) self.assertEqual(definition[2], { 'model_name': 'Pony', 'name': 'test_constraint_pony_pink_for_weight_gt_5_uniq', }) def test_alter_model_options(self): """ Tests the AlterModelOptions operation. """ project_state = self.set_up_test_model("test_almoop") # Test the state alteration (no DB alteration to test) operation = migrations.AlterModelOptions("Pony", {"permissions": [("can_groom", "Can groom")]}) self.assertEqual(operation.describe(), "Change Meta options on Pony") new_state = project_state.clone() operation.state_forwards("test_almoop", new_state) self.assertEqual(len(project_state.models["test_almoop", "pony"].options.get("permissions", [])), 0) self.assertEqual(len(new_state.models["test_almoop", "pony"].options.get("permissions", [])), 1) self.assertEqual(new_state.models["test_almoop", "pony"].options["permissions"][0][0], "can_groom") # And deconstruction definition = operation.deconstruct() self.assertEqual(definition[0], "AlterModelOptions") self.assertEqual(definition[1], []) self.assertEqual(definition[2], {'name': "Pony", 'options': {"permissions": [("can_groom", "Can groom")]}}) def test_alter_model_options_emptying(self): """ The AlterModelOptions operation removes keys from the dict (#23121) """ project_state = self.set_up_test_model("test_almoop", options=True) # Test the state alteration (no DB alteration to test) operation = migrations.AlterModelOptions("Pony", {}) self.assertEqual(operation.describe(), "Change Meta options on Pony") new_state = project_state.clone() operation.state_forwards("test_almoop", new_state) self.assertEqual(len(project_state.models["test_almoop", "pony"].options.get("permissions", [])), 1) self.assertEqual(len(new_state.models["test_almoop", "pony"].options.get("permissions", [])), 0) # And deconstruction definition = operation.deconstruct() self.assertEqual(definition[0], "AlterModelOptions") self.assertEqual(definition[1], []) self.assertEqual(definition[2], {'name': "Pony", 'options': {}}) def test_alter_order_with_respect_to(self): """ Tests the AlterOrderWithRespectTo operation. """ project_state = self.set_up_test_model("test_alorwrtto", related_model=True) # Test the state alteration operation = migrations.AlterOrderWithRespectTo("Rider", "pony") self.assertEqual(operation.describe(), "Set order_with_respect_to on Rider to pony") new_state = project_state.clone() operation.state_forwards("test_alorwrtto", new_state) self.assertIsNone( project_state.models["test_alorwrtto", "rider"].options.get("order_with_respect_to", None) ) self.assertEqual( new_state.models["test_alorwrtto", "rider"].options.get("order_with_respect_to", None), "pony" ) # Make sure there's no matching index self.assertColumnNotExists("test_alorwrtto_rider", "_order") # Create some rows before alteration rendered_state = project_state.apps pony = rendered_state.get_model("test_alorwrtto", "Pony").objects.create(weight=50) rendered_state.get_model("test_alorwrtto", "Rider").objects.create(pony=pony, friend_id=1) rendered_state.get_model("test_alorwrtto", "Rider").objects.create(pony=pony, friend_id=2) # Test the database alteration with connection.schema_editor() as editor: operation.database_forwards("test_alorwrtto", editor, project_state, new_state) self.assertColumnExists("test_alorwrtto_rider", "_order") # Check for correct value in rows updated_riders = new_state.apps.get_model("test_alorwrtto", "Rider").objects.all() self.assertEqual(updated_riders[0]._order, 0) self.assertEqual(updated_riders[1]._order, 0) # And test reversal with connection.schema_editor() as editor: operation.database_backwards("test_alorwrtto", editor, new_state, project_state) self.assertColumnNotExists("test_alorwrtto_rider", "_order") # And deconstruction definition = operation.deconstruct() self.assertEqual(definition[0], "AlterOrderWithRespectTo") self.assertEqual(definition[1], []) self.assertEqual(definition[2], {'name': "Rider", 'order_with_respect_to': "pony"}) def test_alter_model_managers(self): """ The managers on a model are set. """ project_state = self.set_up_test_model("test_almoma") # Test the state alteration operation = migrations.AlterModelManagers( "Pony", managers=[ ("food_qs", FoodQuerySet.as_manager()), ("food_mgr", FoodManager("a", "b")), ("food_mgr_kwargs", FoodManager("x", "y", 3, 4)), ] ) self.assertEqual(operation.describe(), "Change managers on Pony") managers = project_state.models["test_almoma", "pony"].managers self.assertEqual(managers, []) new_state = project_state.clone() operation.state_forwards("test_almoma", new_state) self.assertIn(("test_almoma", "pony"), new_state.models) managers = new_state.models["test_almoma", "pony"].managers self.assertEqual(managers[0][0], "food_qs") self.assertIsInstance(managers[0][1], models.Manager) self.assertEqual(managers[1][0], "food_mgr") self.assertIsInstance(managers[1][1], FoodManager) self.assertEqual(managers[1][1].args, ("a", "b", 1, 2)) self.assertEqual(managers[2][0], "food_mgr_kwargs") self.assertIsInstance(managers[2][1], FoodManager) self.assertEqual(managers[2][1].args, ("x", "y", 3, 4)) rendered_state = new_state.apps model = rendered_state.get_model('test_almoma', 'pony') self.assertIsInstance(model.food_qs, models.Manager) self.assertIsInstance(model.food_mgr, FoodManager) self.assertIsInstance(model.food_mgr_kwargs, FoodManager) def test_alter_model_managers_emptying(self): """ The managers on a model are set. """ project_state = self.set_up_test_model("test_almomae", manager_model=True) # Test the state alteration operation = migrations.AlterModelManagers("Food", managers=[]) self.assertEqual(operation.describe(), "Change managers on Food") self.assertIn(("test_almomae", "food"), project_state.models) managers = project_state.models["test_almomae", "food"].managers self.assertEqual(managers[0][0], "food_qs") self.assertIsInstance(managers[0][1], models.Manager) self.assertEqual(managers[1][0], "food_mgr") self.assertIsInstance(managers[1][1], FoodManager) self.assertEqual(managers[1][1].args, ("a", "b", 1, 2)) self.assertEqual(managers[2][0], "food_mgr_kwargs") self.assertIsInstance(managers[2][1], FoodManager) self.assertEqual(managers[2][1].args, ("x", "y", 3, 4)) new_state = project_state.clone() operation.state_forwards("test_almomae", new_state) managers = new_state.models["test_almomae", "food"].managers self.assertEqual(managers, []) def test_alter_fk(self): """ Creating and then altering an FK works correctly and deals with the pending SQL (#23091) """ project_state = self.set_up_test_model("test_alfk") # Test adding and then altering the FK in one go create_operation = migrations.CreateModel( name="Rider", fields=[ ("id", models.AutoField(primary_key=True)), ("pony", models.ForeignKey("Pony", models.CASCADE)), ], ) create_state = project_state.clone() create_operation.state_forwards("test_alfk", create_state) alter_operation = migrations.AlterField( model_name='Rider', name='pony', field=models.ForeignKey("Pony", models.CASCADE, editable=False), ) alter_state = create_state.clone() alter_operation.state_forwards("test_alfk", alter_state) with connection.schema_editor() as editor: create_operation.database_forwards("test_alfk", editor, project_state, create_state) alter_operation.database_forwards("test_alfk", editor, create_state, alter_state) def test_alter_fk_non_fk(self): """ Altering an FK to a non-FK works (#23244) """ # Test the state alteration operation = migrations.AlterField( model_name="Rider", name="pony", field=models.FloatField(), ) project_state, new_state = self.make_test_state("test_afknfk", operation, related_model=True) # Test the database alteration self.assertColumnExists("test_afknfk_rider", "pony_id") self.assertColumnNotExists("test_afknfk_rider", "pony") with connection.schema_editor() as editor: operation.database_forwards("test_afknfk", editor, project_state, new_state) self.assertColumnExists("test_afknfk_rider", "pony") self.assertColumnNotExists("test_afknfk_rider", "pony_id") # And test reversal with connection.schema_editor() as editor: operation.database_backwards("test_afknfk", editor, new_state, project_state) self.assertColumnExists("test_afknfk_rider", "pony_id") self.assertColumnNotExists("test_afknfk_rider", "pony") def test_run_sql(self): """ Tests the RunSQL operation. """ project_state = self.set_up_test_model("test_runsql") # Create the operation operation = migrations.RunSQL( # Use a multi-line string with a comment to test splitting on SQLite and MySQL respectively "CREATE TABLE i_love_ponies (id int, special_thing varchar(15));\n" "INSERT INTO i_love_ponies (id, special_thing) VALUES (1, 'i love ponies'); -- this is magic!\n" "INSERT INTO i_love_ponies (id, special_thing) VALUES (2, 'i love django');\n" "UPDATE i_love_ponies SET special_thing = 'Ponies' WHERE special_thing LIKE '%%ponies';" "UPDATE i_love_ponies SET special_thing = 'Django' WHERE special_thing LIKE '%django';", # Run delete queries to test for parameter substitution failure # reported in #23426 "DELETE FROM i_love_ponies WHERE special_thing LIKE '%Django%';" "DELETE FROM i_love_ponies WHERE special_thing LIKE '%%Ponies%%';" "DROP TABLE i_love_ponies", state_operations=[migrations.CreateModel("SomethingElse", [("id", models.AutoField(primary_key=True))])], ) self.assertEqual(operation.describe(), "Raw SQL operation") # Test the state alteration new_state = project_state.clone() operation.state_forwards("test_runsql", new_state) self.assertEqual(len(new_state.models["test_runsql", "somethingelse"].fields), 1) # Make sure there's no table self.assertTableNotExists("i_love_ponies") # Test SQL collection with connection.schema_editor(collect_sql=True) as editor: operation.database_forwards("test_runsql", editor, project_state, new_state) self.assertIn("LIKE '%%ponies';", "\n".join(editor.collected_sql)) operation.database_backwards("test_runsql", editor, project_state, new_state) self.assertIn("LIKE '%%Ponies%%';", "\n".join(editor.collected_sql)) # Test the database alteration with connection.schema_editor() as editor: operation.database_forwards("test_runsql", editor, project_state, new_state) self.assertTableExists("i_love_ponies") # Make sure all the SQL was processed with connection.cursor() as cursor: cursor.execute("SELECT COUNT(*) FROM i_love_ponies") self.assertEqual(cursor.fetchall()[0][0], 2) cursor.execute("SELECT COUNT(*) FROM i_love_ponies WHERE special_thing = 'Django'") self.assertEqual(cursor.fetchall()[0][0], 1) cursor.execute("SELECT COUNT(*) FROM i_love_ponies WHERE special_thing = 'Ponies'") self.assertEqual(cursor.fetchall()[0][0], 1) # And test reversal self.assertTrue(operation.reversible) with connection.schema_editor() as editor: operation.database_backwards("test_runsql", editor, new_state, project_state) self.assertTableNotExists("i_love_ponies") # And deconstruction definition = operation.deconstruct() self.assertEqual(definition[0], "RunSQL") self.assertEqual(definition[1], []) self.assertEqual(sorted(definition[2]), ["reverse_sql", "sql", "state_operations"]) # And elidable reduction self.assertIs(False, operation.reduce(operation, [])) elidable_operation = migrations.RunSQL('SELECT 1 FROM void;', elidable=True) self.assertEqual(elidable_operation.reduce(operation, []), [operation]) def test_run_sql_params(self): """ #23426 - RunSQL should accept parameters. """ project_state = self.set_up_test_model("test_runsql") # Create the operation operation = migrations.RunSQL( ["CREATE TABLE i_love_ponies (id int, special_thing varchar(15));"], ["DROP TABLE i_love_ponies"], ) param_operation = migrations.RunSQL( # forwards ( "INSERT INTO i_love_ponies (id, special_thing) VALUES (1, 'Django');", ["INSERT INTO i_love_ponies (id, special_thing) VALUES (2, %s);", ['Ponies']], ("INSERT INTO i_love_ponies (id, special_thing) VALUES (%s, %s);", (3, 'Python',)), ), # backwards [ "DELETE FROM i_love_ponies WHERE special_thing = 'Django';", ["DELETE FROM i_love_ponies WHERE special_thing = 'Ponies';", None], ("DELETE FROM i_love_ponies WHERE id = %s OR special_thing = %s;", [3, 'Python']), ] ) # Make sure there's no table self.assertTableNotExists("i_love_ponies") new_state = project_state.clone() # Test the database alteration with connection.schema_editor() as editor: operation.database_forwards("test_runsql", editor, project_state, new_state) # Test parameter passing with connection.schema_editor() as editor: param_operation.database_forwards("test_runsql", editor, project_state, new_state) # Make sure all the SQL was processed with connection.cursor() as cursor: cursor.execute("SELECT COUNT(*) FROM i_love_ponies") self.assertEqual(cursor.fetchall()[0][0], 3) with connection.schema_editor() as editor: param_operation.database_backwards("test_runsql", editor, new_state, project_state) with connection.cursor() as cursor: cursor.execute("SELECT COUNT(*) FROM i_love_ponies") self.assertEqual(cursor.fetchall()[0][0], 0) # And test reversal with connection.schema_editor() as editor: operation.database_backwards("test_runsql", editor, new_state, project_state) self.assertTableNotExists("i_love_ponies") def test_run_sql_params_invalid(self): """ #23426 - RunSQL should fail when a list of statements with an incorrect number of tuples is given. """ project_state = self.set_up_test_model("test_runsql") new_state = project_state.clone() operation = migrations.RunSQL( # forwards [ ["INSERT INTO foo (bar) VALUES ('buz');"] ], # backwards ( ("DELETE FROM foo WHERE bar = 'buz';", 'invalid', 'parameter count'), ), ) with connection.schema_editor() as editor: with self.assertRaisesMessage(ValueError, "Expected a 2-tuple but got 1"): operation.database_forwards("test_runsql", editor, project_state, new_state) with connection.schema_editor() as editor: with self.assertRaisesMessage(ValueError, "Expected a 2-tuple but got 3"): operation.database_backwards("test_runsql", editor, new_state, project_state) def test_run_sql_noop(self): """ #24098 - Tests no-op RunSQL operations. """ operation = migrations.RunSQL(migrations.RunSQL.noop, migrations.RunSQL.noop) with connection.schema_editor() as editor: operation.database_forwards("test_runsql", editor, None, None) operation.database_backwards("test_runsql", editor, None, None) def test_run_python(self): """ Tests the RunPython operation """ project_state = self.set_up_test_model("test_runpython", mti_model=True) # Create the operation def inner_method(models, schema_editor): Pony = models.get_model("test_runpython", "Pony") Pony.objects.create(pink=1, weight=3.55) Pony.objects.create(weight=5) def inner_method_reverse(models, schema_editor): Pony = models.get_model("test_runpython", "Pony") Pony.objects.filter(pink=1, weight=3.55).delete() Pony.objects.filter(weight=5).delete() operation = migrations.RunPython(inner_method, reverse_code=inner_method_reverse) self.assertEqual(operation.describe(), "Raw Python operation") # Test the state alteration does nothing new_state = project_state.clone() operation.state_forwards("test_runpython", new_state) self.assertEqual(new_state, project_state) # Test the database alteration self.assertEqual(project_state.apps.get_model("test_runpython", "Pony").objects.count(), 0) with connection.schema_editor() as editor: operation.database_forwards("test_runpython", editor, project_state, new_state) self.assertEqual(project_state.apps.get_model("test_runpython", "Pony").objects.count(), 2) # Now test reversal self.assertTrue(operation.reversible) with connection.schema_editor() as editor: operation.database_backwards("test_runpython", editor, project_state, new_state) self.assertEqual(project_state.apps.get_model("test_runpython", "Pony").objects.count(), 0) # Now test we can't use a string with self.assertRaisesMessage(ValueError, 'RunPython must be supplied with a callable'): migrations.RunPython("print 'ahahaha'") # And deconstruction definition = operation.deconstruct() self.assertEqual(definition[0], "RunPython") self.assertEqual(definition[1], []) self.assertEqual(sorted(definition[2]), ["code", "reverse_code"]) # Also test reversal fails, with an operation identical to above but without reverse_code set no_reverse_operation = migrations.RunPython(inner_method) self.assertFalse(no_reverse_operation.reversible) with connection.schema_editor() as editor: no_reverse_operation.database_forwards("test_runpython", editor, project_state, new_state) with self.assertRaises(NotImplementedError): no_reverse_operation.database_backwards("test_runpython", editor, new_state, project_state) self.assertEqual(project_state.apps.get_model("test_runpython", "Pony").objects.count(), 2) def create_ponies(models, schema_editor): Pony = models.get_model("test_runpython", "Pony") pony1 = Pony.objects.create(pink=1, weight=3.55) self.assertIsNot(pony1.pk, None) pony2 = Pony.objects.create(weight=5) self.assertIsNot(pony2.pk, None) self.assertNotEqual(pony1.pk, pony2.pk) operation = migrations.RunPython(create_ponies) with connection.schema_editor() as editor: operation.database_forwards("test_runpython", editor, project_state, new_state) self.assertEqual(project_state.apps.get_model("test_runpython", "Pony").objects.count(), 4) # And deconstruction definition = operation.deconstruct() self.assertEqual(definition[0], "RunPython") self.assertEqual(definition[1], []) self.assertEqual(sorted(definition[2]), ["code"]) def create_shetlandponies(models, schema_editor): ShetlandPony = models.get_model("test_runpython", "ShetlandPony") pony1 = ShetlandPony.objects.create(weight=4.0) self.assertIsNot(pony1.pk, None) pony2 = ShetlandPony.objects.create(weight=5.0) self.assertIsNot(pony2.pk, None) self.assertNotEqual(pony1.pk, pony2.pk) operation = migrations.RunPython(create_shetlandponies) with connection.schema_editor() as editor: operation.database_forwards("test_runpython", editor, project_state, new_state) self.assertEqual(project_state.apps.get_model("test_runpython", "Pony").objects.count(), 6) self.assertEqual(project_state.apps.get_model("test_runpython", "ShetlandPony").objects.count(), 2) # And elidable reduction self.assertIs(False, operation.reduce(operation, [])) elidable_operation = migrations.RunPython(inner_method, elidable=True) self.assertEqual(elidable_operation.reduce(operation, []), [operation]) def test_run_python_atomic(self): """ Tests the RunPython operation correctly handles the "atomic" keyword """ project_state = self.set_up_test_model("test_runpythonatomic", mti_model=True) def inner_method(models, schema_editor): Pony = models.get_model("test_runpythonatomic", "Pony") Pony.objects.create(pink=1, weight=3.55) raise ValueError("Adrian hates ponies.") # Verify atomicity when applying. atomic_migration = Migration("test", "test_runpythonatomic") atomic_migration.operations = [migrations.RunPython(inner_method, reverse_code=inner_method)] non_atomic_migration = Migration("test", "test_runpythonatomic") non_atomic_migration.operations = [migrations.RunPython(inner_method, reverse_code=inner_method, atomic=False)] # If we're a fully-transactional database, both versions should rollback if connection.features.can_rollback_ddl: self.assertEqual(project_state.apps.get_model("test_runpythonatomic", "Pony").objects.count(), 0) with self.assertRaises(ValueError): with connection.schema_editor() as editor: atomic_migration.apply(project_state, editor) self.assertEqual(project_state.apps.get_model("test_runpythonatomic", "Pony").objects.count(), 0) with self.assertRaises(ValueError): with connection.schema_editor() as editor: non_atomic_migration.apply(project_state, editor) self.assertEqual(project_state.apps.get_model("test_runpythonatomic", "Pony").objects.count(), 0) # Otherwise, the non-atomic operation should leave a row there else: self.assertEqual(project_state.apps.get_model("test_runpythonatomic", "Pony").objects.count(), 0) with self.assertRaises(ValueError): with connection.schema_editor() as editor: atomic_migration.apply(project_state, editor) self.assertEqual(project_state.apps.get_model("test_runpythonatomic", "Pony").objects.count(), 0) with self.assertRaises(ValueError): with connection.schema_editor() as editor: non_atomic_migration.apply(project_state, editor) self.assertEqual(project_state.apps.get_model("test_runpythonatomic", "Pony").objects.count(), 1) # Reset object count to zero and verify atomicity when unapplying. project_state.apps.get_model("test_runpythonatomic", "Pony").objects.all().delete() # On a fully-transactional database, both versions rollback. if connection.features.can_rollback_ddl: self.assertEqual(project_state.apps.get_model("test_runpythonatomic", "Pony").objects.count(), 0) with self.assertRaises(ValueError): with connection.schema_editor() as editor: atomic_migration.unapply(project_state, editor) self.assertEqual(project_state.apps.get_model("test_runpythonatomic", "Pony").objects.count(), 0) with self.assertRaises(ValueError): with connection.schema_editor() as editor: non_atomic_migration.unapply(project_state, editor) self.assertEqual(project_state.apps.get_model("test_runpythonatomic", "Pony").objects.count(), 0) # Otherwise, the non-atomic operation leaves a row there. else: self.assertEqual(project_state.apps.get_model("test_runpythonatomic", "Pony").objects.count(), 0) with self.assertRaises(ValueError): with connection.schema_editor() as editor: atomic_migration.unapply(project_state, editor) self.assertEqual(project_state.apps.get_model("test_runpythonatomic", "Pony").objects.count(), 0) with self.assertRaises(ValueError): with connection.schema_editor() as editor: non_atomic_migration.unapply(project_state, editor) self.assertEqual(project_state.apps.get_model("test_runpythonatomic", "Pony").objects.count(), 1) # Verify deconstruction. definition = non_atomic_migration.operations[0].deconstruct() self.assertEqual(definition[0], "RunPython") self.assertEqual(definition[1], []) self.assertEqual(sorted(definition[2]), ["atomic", "code", "reverse_code"]) def test_run_python_related_assignment(self): """ #24282 - Model changes to a FK reverse side update the model on the FK side as well. """ def inner_method(models, schema_editor): Author = models.get_model("test_authors", "Author") Book = models.get_model("test_books", "Book") author = Author.objects.create(name="Hemingway") Book.objects.create(title="Old Man and The Sea", author=author) create_author = migrations.CreateModel( "Author", [ ("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=100)), ], options={}, ) create_book = migrations.CreateModel( "Book", [ ("id", models.AutoField(primary_key=True)), ("title", models.CharField(max_length=100)), ("author", models.ForeignKey("test_authors.Author", models.CASCADE)) ], options={}, ) add_hometown = migrations.AddField( "Author", "hometown", models.CharField(max_length=100), ) create_old_man = migrations.RunPython(inner_method, inner_method) project_state = ProjectState() new_state = project_state.clone() with connection.schema_editor() as editor: create_author.state_forwards("test_authors", new_state) create_author.database_forwards("test_authors", editor, project_state, new_state) project_state = new_state new_state = new_state.clone() with connection.schema_editor() as editor: create_book.state_forwards("test_books", new_state) create_book.database_forwards("test_books", editor, project_state, new_state) project_state = new_state new_state = new_state.clone() with connection.schema_editor() as editor: add_hometown.state_forwards("test_authors", new_state) add_hometown.database_forwards("test_authors", editor, project_state, new_state) project_state = new_state new_state = new_state.clone() with connection.schema_editor() as editor: create_old_man.state_forwards("test_books", new_state) create_old_man.database_forwards("test_books", editor, project_state, new_state) def test_model_with_bigautofield(self): """ A model with BigAutoField can be created. """ def create_data(models, schema_editor): Author = models.get_model("test_author", "Author") Book = models.get_model("test_book", "Book") author1 = Author.objects.create(name="Hemingway") Book.objects.create(title="Old Man and The Sea", author=author1) Book.objects.create(id=2 ** 33, title="A farewell to arms", author=author1) author2 = Author.objects.create(id=2 ** 33, name="Remarque") Book.objects.create(title="All quiet on the western front", author=author2) Book.objects.create(title="Arc de Triomphe", author=author2) create_author = migrations.CreateModel( "Author", [ ("id", models.BigAutoField(primary_key=True)), ("name", models.CharField(max_length=100)), ], options={}, ) create_book = migrations.CreateModel( "Book", [ ("id", models.BigAutoField(primary_key=True)), ("title", models.CharField(max_length=100)), ("author", models.ForeignKey(to="test_author.Author", on_delete=models.CASCADE)) ], options={}, ) fill_data = migrations.RunPython(create_data) project_state = ProjectState() new_state = project_state.clone() with connection.schema_editor() as editor: create_author.state_forwards("test_author", new_state) create_author.database_forwards("test_author", editor, project_state, new_state) project_state = new_state new_state = new_state.clone() with connection.schema_editor() as editor: create_book.state_forwards("test_book", new_state) create_book.database_forwards("test_book", editor, project_state, new_state) project_state = new_state new_state = new_state.clone() with connection.schema_editor() as editor: fill_data.state_forwards("fill_data", new_state) fill_data.database_forwards("fill_data", editor, project_state, new_state) def _test_autofield_foreignfield_growth(self, source_field, target_field, target_value): """ A field may be migrated in the following ways: - AutoField to BigAutoField - SmallAutoField to AutoField - SmallAutoField to BigAutoField """ def create_initial_data(models, schema_editor): Article = models.get_model("test_article", "Article") Blog = models.get_model("test_blog", "Blog") blog = Blog.objects.create(name="web development done right") Article.objects.create(name="Frameworks", blog=blog) Article.objects.create(name="Programming Languages", blog=blog) def create_big_data(models, schema_editor): Article = models.get_model("test_article", "Article") Blog = models.get_model("test_blog", "Blog") blog2 = Blog.objects.create(name="Frameworks", id=target_value) Article.objects.create(name="Django", blog=blog2) Article.objects.create(id=target_value, name="Django2", blog=blog2) create_blog = migrations.CreateModel( "Blog", [ ("id", source_field(primary_key=True)), ("name", models.CharField(max_length=100)), ], options={}, ) create_article = migrations.CreateModel( "Article", [ ("id", source_field(primary_key=True)), ("blog", models.ForeignKey(to="test_blog.Blog", on_delete=models.CASCADE)), ("name", models.CharField(max_length=100)), ("data", models.TextField(default="")), ], options={}, ) fill_initial_data = migrations.RunPython(create_initial_data, create_initial_data) fill_big_data = migrations.RunPython(create_big_data, create_big_data) grow_article_id = migrations.AlterField('Article', 'id', target_field(primary_key=True)) grow_blog_id = migrations.AlterField('Blog', 'id', target_field(primary_key=True)) project_state = ProjectState() new_state = project_state.clone() with connection.schema_editor() as editor: create_blog.state_forwards("test_blog", new_state) create_blog.database_forwards("test_blog", editor, project_state, new_state) project_state = new_state new_state = new_state.clone() with connection.schema_editor() as editor: create_article.state_forwards("test_article", new_state) create_article.database_forwards("test_article", editor, project_state, new_state) project_state = new_state new_state = new_state.clone() with connection.schema_editor() as editor: fill_initial_data.state_forwards("fill_initial_data", new_state) fill_initial_data.database_forwards("fill_initial_data", editor, project_state, new_state) project_state = new_state new_state = new_state.clone() with connection.schema_editor() as editor: grow_article_id.state_forwards("test_article", new_state) grow_article_id.database_forwards("test_article", editor, project_state, new_state) state = new_state.clone() article = state.apps.get_model("test_article.Article") self.assertIsInstance(article._meta.pk, target_field) project_state = new_state new_state = new_state.clone() with connection.schema_editor() as editor: grow_blog_id.state_forwards("test_blog", new_state) grow_blog_id.database_forwards("test_blog", editor, project_state, new_state) state = new_state.clone() blog = state.apps.get_model("test_blog.Blog") self.assertIsInstance(blog._meta.pk, target_field) project_state = new_state new_state = new_state.clone() with connection.schema_editor() as editor: fill_big_data.state_forwards("fill_big_data", new_state) fill_big_data.database_forwards("fill_big_data", editor, project_state, new_state) def test_autofield__bigautofield_foreignfield_growth(self): """A field may be migrated from AutoField to BigAutoField.""" self._test_autofield_foreignfield_growth( models.AutoField, models.BigAutoField, 2 ** 33, ) def test_smallfield_autofield_foreignfield_growth(self): """A field may be migrated from SmallAutoField to AutoField.""" self._test_autofield_foreignfield_growth( models.SmallAutoField, models.AutoField, 2 ** 22, ) def test_smallfield_bigautofield_foreignfield_growth(self): """A field may be migrated from SmallAutoField to BigAutoField.""" self._test_autofield_foreignfield_growth( models.SmallAutoField, models.BigAutoField, 2 ** 33, ) def test_run_python_noop(self): """ #24098 - Tests no-op RunPython operations. """ project_state = ProjectState() new_state = project_state.clone() operation = migrations.RunPython(migrations.RunPython.noop, migrations.RunPython.noop) with connection.schema_editor() as editor: operation.database_forwards("test_runpython", editor, project_state, new_state) operation.database_backwards("test_runpython", editor, new_state, project_state) def test_separate_database_and_state(self): """ Tests the SeparateDatabaseAndState operation. """ project_state = self.set_up_test_model("test_separatedatabaseandstate") # Create the operation database_operation = migrations.RunSQL( "CREATE TABLE i_love_ponies (id int, special_thing int);", "DROP TABLE i_love_ponies;" ) state_operation = migrations.CreateModel("SomethingElse", [("id", models.AutoField(primary_key=True))]) operation = migrations.SeparateDatabaseAndState( state_operations=[state_operation], database_operations=[database_operation] ) self.assertEqual(operation.describe(), "Custom state/database change combination") # Test the state alteration new_state = project_state.clone() operation.state_forwards("test_separatedatabaseandstate", new_state) self.assertEqual(len(new_state.models["test_separatedatabaseandstate", "somethingelse"].fields), 1) # Make sure there's no table self.assertTableNotExists("i_love_ponies") # Test the database alteration with connection.schema_editor() as editor: operation.database_forwards("test_separatedatabaseandstate", editor, project_state, new_state) self.assertTableExists("i_love_ponies") # And test reversal self.assertTrue(operation.reversible) with connection.schema_editor() as editor: operation.database_backwards("test_separatedatabaseandstate", editor, new_state, project_state) self.assertTableNotExists("i_love_ponies") # And deconstruction definition = operation.deconstruct() self.assertEqual(definition[0], "SeparateDatabaseAndState") self.assertEqual(definition[1], []) self.assertEqual(sorted(definition[2]), ["database_operations", "state_operations"]) def test_separate_database_and_state2(self): """ A complex SeparateDatabaseAndState operation: Multiple operations both for state and database. Verify the state dependencies within each list and that state ops don't affect the database. """ app_label = "test_separatedatabaseandstate2" project_state = self.set_up_test_model(app_label) # Create the operation database_operations = [ migrations.CreateModel( "ILovePonies", [("id", models.AutoField(primary_key=True))], options={"db_table": "iloveponies"}, ), migrations.CreateModel( "ILoveMorePonies", # We use IntegerField and not AutoField because # the model is going to be deleted immediately # and with an AutoField this fails on Oracle [("id", models.IntegerField(primary_key=True))], options={"db_table": "ilovemoreponies"}, ), migrations.DeleteModel("ILoveMorePonies"), migrations.CreateModel( "ILoveEvenMorePonies", [("id", models.AutoField(primary_key=True))], options={"db_table": "iloveevenmoreponies"}, ), ] state_operations = [ migrations.CreateModel( "SomethingElse", [("id", models.AutoField(primary_key=True))], options={"db_table": "somethingelse"}, ), migrations.DeleteModel("SomethingElse"), migrations.CreateModel( "SomethingCompletelyDifferent", [("id", models.AutoField(primary_key=True))], options={"db_table": "somethingcompletelydifferent"}, ), ] operation = migrations.SeparateDatabaseAndState( state_operations=state_operations, database_operations=database_operations, ) # Test the state alteration new_state = project_state.clone() operation.state_forwards(app_label, new_state) def assertModelsAndTables(after_db): # Tables and models exist, or don't, as they should: self.assertNotIn((app_label, "somethingelse"), new_state.models) self.assertEqual(len(new_state.models[app_label, "somethingcompletelydifferent"].fields), 1) self.assertNotIn((app_label, "iloveponiesonies"), new_state.models) self.assertNotIn((app_label, "ilovemoreponies"), new_state.models) self.assertNotIn((app_label, "iloveevenmoreponies"), new_state.models) self.assertTableNotExists("somethingelse") self.assertTableNotExists("somethingcompletelydifferent") self.assertTableNotExists("ilovemoreponies") if after_db: self.assertTableExists("iloveponies") self.assertTableExists("iloveevenmoreponies") else: self.assertTableNotExists("iloveponies") self.assertTableNotExists("iloveevenmoreponies") assertModelsAndTables(after_db=False) # Test the database alteration with connection.schema_editor() as editor: operation.database_forwards(app_label, editor, project_state, new_state) assertModelsAndTables(after_db=True) # And test reversal self.assertTrue(operation.reversible) with connection.schema_editor() as editor: operation.database_backwards(app_label, editor, new_state, project_state) assertModelsAndTables(after_db=False) class SwappableOperationTests(OperationTestBase): """ Key operations ignore swappable models (we don't want to replicate all of them here, as the functionality is in a common base class anyway) """ available_apps = ['migrations'] @override_settings(TEST_SWAP_MODEL="migrations.SomeFakeModel") def test_create_ignore_swapped(self): """ The CreateTable operation ignores swapped models. """ operation = migrations.CreateModel( "Pony", [ ("id", models.AutoField(primary_key=True)), ("pink", models.IntegerField(default=1)), ], options={ "swappable": "TEST_SWAP_MODEL", }, ) # Test the state alteration (it should still be there!) project_state = ProjectState() new_state = project_state.clone() operation.state_forwards("test_crigsw", new_state) self.assertEqual(new_state.models["test_crigsw", "pony"].name, "Pony") self.assertEqual(len(new_state.models["test_crigsw", "pony"].fields), 2) # Test the database alteration self.assertTableNotExists("test_crigsw_pony") with connection.schema_editor() as editor: operation.database_forwards("test_crigsw", editor, project_state, new_state) self.assertTableNotExists("test_crigsw_pony") # And test reversal with connection.schema_editor() as editor: operation.database_backwards("test_crigsw", editor, new_state, project_state) self.assertTableNotExists("test_crigsw_pony") @override_settings(TEST_SWAP_MODEL="migrations.SomeFakeModel") def test_delete_ignore_swapped(self): """ Tests the DeleteModel operation ignores swapped models. """ operation = migrations.DeleteModel("Pony") project_state, new_state = self.make_test_state("test_dligsw", operation) # Test the database alteration self.assertTableNotExists("test_dligsw_pony") with connection.schema_editor() as editor: operation.database_forwards("test_dligsw", editor, project_state, new_state) self.assertTableNotExists("test_dligsw_pony") # And test reversal with connection.schema_editor() as editor: operation.database_backwards("test_dligsw", editor, new_state, project_state) self.assertTableNotExists("test_dligsw_pony") @override_settings(TEST_SWAP_MODEL="migrations.SomeFakeModel") def test_add_field_ignore_swapped(self): """ Tests the AddField operation. """ # Test the state alteration operation = migrations.AddField( "Pony", "height", models.FloatField(null=True, default=5), ) project_state, new_state = self.make_test_state("test_adfligsw", operation) # Test the database alteration self.assertTableNotExists("test_adfligsw_pony") with connection.schema_editor() as editor: operation.database_forwards("test_adfligsw", editor, project_state, new_state) self.assertTableNotExists("test_adfligsw_pony") # And test reversal with connection.schema_editor() as editor: operation.database_backwards("test_adfligsw", editor, new_state, project_state) self.assertTableNotExists("test_adfligsw_pony") @override_settings(TEST_SWAP_MODEL='migrations.SomeFakeModel') def test_indexes_ignore_swapped(self): """ Add/RemoveIndex operations ignore swapped models. """ operation = migrations.AddIndex('Pony', models.Index(fields=['pink'], name='my_name_idx')) project_state, new_state = self.make_test_state('test_adinigsw', operation) with connection.schema_editor() as editor: # No database queries should be run for swapped models operation.database_forwards('test_adinigsw', editor, project_state, new_state) operation.database_backwards('test_adinigsw', editor, new_state, project_state) operation = migrations.RemoveIndex('Pony', models.Index(fields=['pink'], name='my_name_idx')) project_state, new_state = self.make_test_state("test_rminigsw", operation) with connection.schema_editor() as editor: operation.database_forwards('test_rminigsw', editor, project_state, new_state) operation.database_backwards('test_rminigsw', editor, new_state, project_state) class TestCreateModel(SimpleTestCase): def test_references_model_mixin(self): CreateModel('name', [], bases=(Mixin, models.Model)).references_model('other_model') class FieldOperationTests(SimpleTestCase): def test_references_model(self): operation = FieldOperation('MoDel', 'field', models.ForeignKey('Other', models.CASCADE)) # Model name match. self.assertIs(operation.references_model('mOdEl'), True) # Referenced field. self.assertIs(operation.references_model('oTher'), True) # Doesn't reference. self.assertIs(operation.references_model('Whatever'), False) def test_references_field_by_name(self): operation = FieldOperation('MoDel', 'field', models.BooleanField(default=False)) self.assertIs(operation.references_field('model', 'field'), True) def test_references_field_by_remote_field_model(self): operation = FieldOperation('Model', 'field', models.ForeignKey('Other', models.CASCADE)) self.assertIs(operation.references_field('Other', 'whatever'), True) self.assertIs(operation.references_field('Missing', 'whatever'), False) def test_references_field_by_from_fields(self): operation = FieldOperation( 'Model', 'field', models.fields.related.ForeignObject('Other', models.CASCADE, ['from'], ['to']) ) self.assertIs(operation.references_field('Model', 'from'), True) self.assertIs(operation.references_field('Model', 'to'), False) self.assertIs(operation.references_field('Other', 'from'), False) self.assertIs(operation.references_field('Model', 'to'), False) def test_references_field_by_to_fields(self): operation = FieldOperation('Model', 'field', models.ForeignKey('Other', models.CASCADE, to_field='field')) self.assertIs(operation.references_field('Other', 'field'), True) self.assertIs(operation.references_field('Other', 'whatever'), False) self.assertIs(operation.references_field('Missing', 'whatever'), False) def test_references_field_by_through(self): operation = FieldOperation('Model', 'field', models.ManyToManyField('Other', through='Through')) self.assertIs(operation.references_field('Other', 'whatever'), True) self.assertIs(operation.references_field('Through', 'whatever'), True) self.assertIs(operation.references_field('Missing', 'whatever'), False) def test_reference_field_by_through_fields(self): operation = FieldOperation( 'Model', 'field', models.ManyToManyField('Other', through='Through', through_fields=('first', 'second')) ) self.assertIs(operation.references_field('Other', 'whatever'), True) self.assertIs(operation.references_field('Through', 'whatever'), False) self.assertIs(operation.references_field('Through', 'first'), True) self.assertIs(operation.references_field('Through', 'second'), True)
9a55da623bfeb3974da0886879f25ba29f1a66e4ff43dba1482b3b05fc1bb0b3
import os import sys import unittest from types import ModuleType, SimpleNamespace from unittest import mock from django.conf import ENVIRONMENT_VARIABLE, LazySettings, Settings, settings from django.core.exceptions import ImproperlyConfigured from django.http import HttpRequest from django.test import ( SimpleTestCase, TestCase, TransactionTestCase, modify_settings, override_settings, signals, ) from django.test.utils import requires_tz_support @modify_settings(ITEMS={ 'prepend': ['b'], 'append': ['d'], 'remove': ['a', 'e'] }) @override_settings(ITEMS=['a', 'c', 'e'], ITEMS_OUTER=[1, 2, 3], TEST='override', TEST_OUTER='outer') class FullyDecoratedTranTestCase(TransactionTestCase): available_apps = [] def test_override(self): self.assertEqual(settings.ITEMS, ['b', 'c', 'd']) self.assertEqual(settings.ITEMS_OUTER, [1, 2, 3]) self.assertEqual(settings.TEST, 'override') self.assertEqual(settings.TEST_OUTER, 'outer') @modify_settings(ITEMS={ 'append': ['e', 'f'], 'prepend': ['a'], 'remove': ['d', 'c'], }) def test_method_list_override(self): self.assertEqual(settings.ITEMS, ['a', 'b', 'e', 'f']) self.assertEqual(settings.ITEMS_OUTER, [1, 2, 3]) @modify_settings(ITEMS={ 'append': ['b'], 'prepend': ['d'], 'remove': ['a', 'c', 'e'], }) def test_method_list_override_no_ops(self): self.assertEqual(settings.ITEMS, ['b', 'd']) @modify_settings(ITEMS={ 'append': 'e', 'prepend': 'a', 'remove': 'c', }) def test_method_list_override_strings(self): self.assertEqual(settings.ITEMS, ['a', 'b', 'd', 'e']) @modify_settings(ITEMS={'remove': ['b', 'd']}) @modify_settings(ITEMS={'append': ['b'], 'prepend': ['d']}) def test_method_list_override_nested_order(self): self.assertEqual(settings.ITEMS, ['d', 'c', 'b']) @override_settings(TEST='override2') def test_method_override(self): self.assertEqual(settings.TEST, 'override2') self.assertEqual(settings.TEST_OUTER, 'outer') def test_decorated_testcase_name(self): self.assertEqual(FullyDecoratedTranTestCase.__name__, 'FullyDecoratedTranTestCase') def test_decorated_testcase_module(self): self.assertEqual(FullyDecoratedTranTestCase.__module__, __name__) @modify_settings(ITEMS={ 'prepend': ['b'], 'append': ['d'], 'remove': ['a', 'e'] }) @override_settings(ITEMS=['a', 'c', 'e'], TEST='override') class FullyDecoratedTestCase(TestCase): def test_override(self): self.assertEqual(settings.ITEMS, ['b', 'c', 'd']) self.assertEqual(settings.TEST, 'override') @modify_settings(ITEMS={ 'append': 'e', 'prepend': 'a', 'remove': 'c', }) @override_settings(TEST='override2') def test_method_override(self): self.assertEqual(settings.ITEMS, ['a', 'b', 'd', 'e']) self.assertEqual(settings.TEST, 'override2') class ClassDecoratedTestCaseSuper(TestCase): """ Dummy class for testing max recursion error in child class call to super(). Refs #17011. """ def test_max_recursion_error(self): pass @override_settings(TEST='override') class ClassDecoratedTestCase(ClassDecoratedTestCaseSuper): @classmethod def setUpClass(cls): super().setUpClass() cls.foo = getattr(settings, 'TEST', 'BUG') def test_override(self): self.assertEqual(settings.TEST, 'override') def test_setupclass_override(self): """Settings are overridden within setUpClass (#21281).""" self.assertEqual(self.foo, 'override') @override_settings(TEST='override2') def test_method_override(self): self.assertEqual(settings.TEST, 'override2') def test_max_recursion_error(self): """ Overriding a method on a super class and then calling that method on the super class should not trigger infinite recursion. See #17011. """ super().test_max_recursion_error() @modify_settings(ITEMS={'append': 'mother'}) @override_settings(ITEMS=['father'], TEST='override-parent') class ParentDecoratedTestCase(TestCase): pass @modify_settings(ITEMS={'append': ['child']}) @override_settings(TEST='override-child') class ChildDecoratedTestCase(ParentDecoratedTestCase): def test_override_settings_inheritance(self): self.assertEqual(settings.ITEMS, ['father', 'mother', 'child']) self.assertEqual(settings.TEST, 'override-child') class SettingsTests(SimpleTestCase): def setUp(self): self.testvalue = None signals.setting_changed.connect(self.signal_callback) def tearDown(self): signals.setting_changed.disconnect(self.signal_callback) def signal_callback(self, sender, setting, value, **kwargs): if setting == 'TEST': self.testvalue = value def test_override(self): settings.TEST = 'test' self.assertEqual('test', settings.TEST) with self.settings(TEST='override'): self.assertEqual('override', settings.TEST) self.assertEqual('test', settings.TEST) del settings.TEST def test_override_change(self): settings.TEST = 'test' self.assertEqual('test', settings.TEST) with self.settings(TEST='override'): self.assertEqual('override', settings.TEST) settings.TEST = 'test2' self.assertEqual('test', settings.TEST) del settings.TEST def test_override_doesnt_leak(self): with self.assertRaises(AttributeError): getattr(settings, 'TEST') with self.settings(TEST='override'): self.assertEqual('override', settings.TEST) settings.TEST = 'test' with self.assertRaises(AttributeError): getattr(settings, 'TEST') @override_settings(TEST='override') def test_decorator(self): self.assertEqual('override', settings.TEST) def test_context_manager(self): with self.assertRaises(AttributeError): getattr(settings, 'TEST') override = override_settings(TEST='override') with self.assertRaises(AttributeError): getattr(settings, 'TEST') override.enable() self.assertEqual('override', settings.TEST) override.disable() with self.assertRaises(AttributeError): getattr(settings, 'TEST') def test_class_decorator(self): # SimpleTestCase can be decorated by override_settings, but not ut.TestCase class SimpleTestCaseSubclass(SimpleTestCase): pass class UnittestTestCaseSubclass(unittest.TestCase): pass decorated = override_settings(TEST='override')(SimpleTestCaseSubclass) self.assertIsInstance(decorated, type) self.assertTrue(issubclass(decorated, SimpleTestCase)) with self.assertRaisesMessage(Exception, "Only subclasses of Django SimpleTestCase"): decorated = override_settings(TEST='override')(UnittestTestCaseSubclass) def test_signal_callback_context_manager(self): with self.assertRaises(AttributeError): getattr(settings, 'TEST') with self.settings(TEST='override'): self.assertEqual(self.testvalue, 'override') self.assertIsNone(self.testvalue) @override_settings(TEST='override') def test_signal_callback_decorator(self): self.assertEqual(self.testvalue, 'override') # # Regression tests for #10130: deleting settings. # def test_settings_delete(self): settings.TEST = 'test' self.assertEqual('test', settings.TEST) del settings.TEST msg = "'Settings' object has no attribute 'TEST'" with self.assertRaisesMessage(AttributeError, msg): getattr(settings, 'TEST') def test_settings_delete_wrapped(self): with self.assertRaisesMessage(TypeError, "can't delete _wrapped."): delattr(settings, '_wrapped') def test_override_settings_delete(self): """ Allow deletion of a setting in an overridden settings set (#18824) """ previous_i18n = settings.USE_I18N previous_l10n = settings.USE_L10N with self.settings(USE_I18N=False): del settings.USE_I18N with self.assertRaises(AttributeError): getattr(settings, 'USE_I18N') # Should also work for a non-overridden setting del settings.USE_L10N with self.assertRaises(AttributeError): getattr(settings, 'USE_L10N') self.assertNotIn('USE_I18N', dir(settings)) self.assertNotIn('USE_L10N', dir(settings)) self.assertEqual(settings.USE_I18N, previous_i18n) self.assertEqual(settings.USE_L10N, previous_l10n) def test_override_settings_nested(self): """ override_settings uses the actual _wrapped attribute at runtime, not when it was instantiated. """ with self.assertRaises(AttributeError): getattr(settings, 'TEST') with self.assertRaises(AttributeError): getattr(settings, 'TEST2') inner = override_settings(TEST2='override') with override_settings(TEST='override'): self.assertEqual('override', settings.TEST) with inner: self.assertEqual('override', settings.TEST) self.assertEqual('override', settings.TEST2) # inner's __exit__ should have restored the settings of the outer # context manager, not those when the class was instantiated self.assertEqual('override', settings.TEST) with self.assertRaises(AttributeError): getattr(settings, 'TEST2') with self.assertRaises(AttributeError): getattr(settings, 'TEST') with self.assertRaises(AttributeError): getattr(settings, 'TEST2') def test_no_secret_key(self): settings_module = ModuleType('fake_settings_module') sys.modules['fake_settings_module'] = settings_module msg = 'The SECRET_KEY setting must not be empty.' try: with self.assertRaisesMessage(ImproperlyConfigured, msg): Settings('fake_settings_module') finally: del sys.modules['fake_settings_module'] def test_no_settings_module(self): msg = ( 'Requested setting%s, but settings are not configured. You ' 'must either define the environment variable DJANGO_SETTINGS_MODULE ' 'or call settings.configure() before accessing settings.' ) orig_settings = os.environ[ENVIRONMENT_VARIABLE] os.environ[ENVIRONMENT_VARIABLE] = '' try: with self.assertRaisesMessage(ImproperlyConfigured, msg % 's'): settings._setup() with self.assertRaisesMessage(ImproperlyConfigured, msg % ' TEST'): settings._setup('TEST') finally: os.environ[ENVIRONMENT_VARIABLE] = orig_settings def test_already_configured(self): with self.assertRaisesMessage(RuntimeError, 'Settings already configured.'): settings.configure() def test_nonupper_settings_prohibited_in_configure(self): s = LazySettings() with self.assertRaisesMessage(TypeError, "Setting 'foo' must be uppercase."): s.configure(foo='bar') def test_nonupper_settings_ignored_in_default_settings(self): s = LazySettings() s.configure(SimpleNamespace(foo='bar')) with self.assertRaises(AttributeError): getattr(s, 'foo') @requires_tz_support @mock.patch('django.conf.global_settings.TIME_ZONE', 'test') def test_incorrect_timezone(self): with self.assertRaisesMessage(ValueError, 'Incorrect timezone setting: test'): settings._setup() class TestComplexSettingOverride(SimpleTestCase): def setUp(self): self.old_warn_override_settings = signals.COMPLEX_OVERRIDE_SETTINGS.copy() signals.COMPLEX_OVERRIDE_SETTINGS.add('TEST_WARN') def tearDown(self): signals.COMPLEX_OVERRIDE_SETTINGS = self.old_warn_override_settings self.assertNotIn('TEST_WARN', signals.COMPLEX_OVERRIDE_SETTINGS) def test_complex_override_warning(self): """Regression test for #19031""" msg = 'Overriding setting TEST_WARN can lead to unexpected behavior.' with self.assertWarnsMessage(UserWarning, msg) as cm: with override_settings(TEST_WARN='override'): self.assertEqual(settings.TEST_WARN, 'override') self.assertEqual(cm.filename, __file__) class SecureProxySslHeaderTest(SimpleTestCase): @override_settings(SECURE_PROXY_SSL_HEADER=None) def test_none(self): req = HttpRequest() self.assertIs(req.is_secure(), False) @override_settings(SECURE_PROXY_SSL_HEADER=('HTTP_X_FORWARDED_PROTO', 'https')) def test_set_without_xheader(self): req = HttpRequest() self.assertIs(req.is_secure(), False) @override_settings(SECURE_PROXY_SSL_HEADER=('HTTP_X_FORWARDED_PROTO', 'https')) def test_set_with_xheader_wrong(self): req = HttpRequest() req.META['HTTP_X_FORWARDED_PROTO'] = 'wrongvalue' self.assertIs(req.is_secure(), False) @override_settings(SECURE_PROXY_SSL_HEADER=('HTTP_X_FORWARDED_PROTO', 'https')) def test_set_with_xheader_right(self): req = HttpRequest() req.META['HTTP_X_FORWARDED_PROTO'] = 'https' self.assertIs(req.is_secure(), True) @override_settings(SECURE_PROXY_SSL_HEADER=('HTTP_X_FORWARDED_PROTO', 'https')) def test_xheader_preferred_to_underlying_request(self): class ProxyRequest(HttpRequest): def _get_scheme(self): """Proxy always connecting via HTTPS""" return 'https' # Client connects via HTTP. req = ProxyRequest() req.META['HTTP_X_FORWARDED_PROTO'] = 'http' self.assertIs(req.is_secure(), False) class IsOverriddenTest(SimpleTestCase): def test_configure(self): s = LazySettings() s.configure(SECRET_KEY='foo') self.assertTrue(s.is_overridden('SECRET_KEY')) def test_module(self): settings_module = ModuleType('fake_settings_module') settings_module.SECRET_KEY = 'foo' sys.modules['fake_settings_module'] = settings_module try: s = Settings('fake_settings_module') self.assertTrue(s.is_overridden('SECRET_KEY')) self.assertFalse(s.is_overridden('ALLOWED_HOSTS')) finally: del sys.modules['fake_settings_module'] def test_override(self): self.assertFalse(settings.is_overridden('ALLOWED_HOSTS')) with override_settings(ALLOWED_HOSTS=[]): self.assertTrue(settings.is_overridden('ALLOWED_HOSTS')) def test_unevaluated_lazysettings_repr(self): lazy_settings = LazySettings() expected = '<LazySettings [Unevaluated]>' self.assertEqual(repr(lazy_settings), expected) def test_evaluated_lazysettings_repr(self): lazy_settings = LazySettings() module = os.environ.get(ENVIRONMENT_VARIABLE) expected = '<LazySettings "%s">' % module # Force evaluation of the lazy object. lazy_settings.APPEND_SLASH self.assertEqual(repr(lazy_settings), expected) def test_usersettingsholder_repr(self): lazy_settings = LazySettings() lazy_settings.configure(APPEND_SLASH=False) expected = '<UserSettingsHolder>' self.assertEqual(repr(lazy_settings._wrapped), expected) def test_settings_repr(self): module = os.environ.get(ENVIRONMENT_VARIABLE) lazy_settings = Settings(module) expected = '<Settings "%s">' % module self.assertEqual(repr(lazy_settings), expected) class TestListSettings(unittest.TestCase): """ Make sure settings that should be lists or tuples throw ImproperlyConfigured if they are set to a string instead of a list or tuple. """ list_or_tuple_settings = ( "INSTALLED_APPS", "TEMPLATE_DIRS", "LOCALE_PATHS", ) def test_tuple_settings(self): settings_module = ModuleType('fake_settings_module') settings_module.SECRET_KEY = 'foo' for setting in self.list_or_tuple_settings: setattr(settings_module, setting, ('non_list_or_tuple_value')) sys.modules['fake_settings_module'] = settings_module try: with self.assertRaises(ImproperlyConfigured): Settings('fake_settings_module') finally: del sys.modules['fake_settings_module'] delattr(settings_module, setting) class SettingChangeEnterException(Exception): pass class SettingChangeExitException(Exception): pass class OverrideSettingsIsolationOnExceptionTests(SimpleTestCase): """ The override_settings context manager restore settings if one of the receivers of "setting_changed" signal fails. Check the three cases of receiver failure detailed in receiver(). In each case, ALL receivers are called when exiting the context manager. """ def setUp(self): signals.setting_changed.connect(self.receiver) self.addCleanup(signals.setting_changed.disconnect, self.receiver) # Create a spy that's connected to the `setting_changed` signal and # executed AFTER `self.receiver`. self.spy_receiver = mock.Mock() signals.setting_changed.connect(self.spy_receiver) self.addCleanup(signals.setting_changed.disconnect, self.spy_receiver) def receiver(self, **kwargs): """ A receiver that fails while certain settings are being changed. - SETTING_BOTH raises an error while receiving the signal on both entering and exiting the context manager. - SETTING_ENTER raises an error only on enter. - SETTING_EXIT raises an error only on exit. """ setting = kwargs['setting'] enter = kwargs['enter'] if setting in ('SETTING_BOTH', 'SETTING_ENTER') and enter: raise SettingChangeEnterException if setting in ('SETTING_BOTH', 'SETTING_EXIT') and not enter: raise SettingChangeExitException def check_settings(self): """Assert that settings for these tests aren't present.""" self.assertFalse(hasattr(settings, 'SETTING_BOTH')) self.assertFalse(hasattr(settings, 'SETTING_ENTER')) self.assertFalse(hasattr(settings, 'SETTING_EXIT')) self.assertFalse(hasattr(settings, 'SETTING_PASS')) def check_spy_receiver_exit_calls(self, call_count): """ Assert that `self.spy_receiver` was called exactly `call_count` times with the ``enter=False`` keyword argument. """ kwargs_with_exit = [ kwargs for args, kwargs in self.spy_receiver.call_args_list if ('enter', False) in kwargs.items() ] self.assertEqual(len(kwargs_with_exit), call_count) def test_override_settings_both(self): """Receiver fails on both enter and exit.""" with self.assertRaises(SettingChangeEnterException): with override_settings(SETTING_PASS='BOTH', SETTING_BOTH='BOTH'): pass self.check_settings() # Two settings were touched, so expect two calls of `spy_receiver`. self.check_spy_receiver_exit_calls(call_count=2) def test_override_settings_enter(self): """Receiver fails on enter only.""" with self.assertRaises(SettingChangeEnterException): with override_settings(SETTING_PASS='ENTER', SETTING_ENTER='ENTER'): pass self.check_settings() # Two settings were touched, so expect two calls of `spy_receiver`. self.check_spy_receiver_exit_calls(call_count=2) def test_override_settings_exit(self): """Receiver fails on exit only.""" with self.assertRaises(SettingChangeExitException): with override_settings(SETTING_PASS='EXIT', SETTING_EXIT='EXIT'): pass self.check_settings() # Two settings were touched, so expect two calls of `spy_receiver`. self.check_spy_receiver_exit_calls(call_count=2) def test_override_settings_reusable_on_enter(self): """ Error is raised correctly when reusing the same override_settings instance. """ @override_settings(SETTING_ENTER='ENTER') def decorated_function(): pass with self.assertRaises(SettingChangeEnterException): decorated_function() signals.setting_changed.disconnect(self.receiver) # This call shouldn't raise any errors. decorated_function()
294958620aeb798189a7a5e58514a8d69b505ab80a0243a6a3db92eee1a64e3d
import os import shutil import sys import tempfile import threading import time import unittest from datetime import datetime, timedelta from io import StringIO from urllib.request import urlopen from django.core.cache import cache from django.core.exceptions import SuspiciousFileOperation from django.core.files.base import ContentFile, File from django.core.files.storage import FileSystemStorage, get_storage_class from django.core.files.uploadedfile import ( InMemoryUploadedFile, SimpleUploadedFile, TemporaryUploadedFile, ) from django.db.models.fields.files import FileDescriptor from django.test import ( LiveServerTestCase, SimpleTestCase, TestCase, override_settings, ) from django.test.utils import requires_tz_support from django.urls import NoReverseMatch, reverse_lazy from django.utils import timezone from .models import Storage, temp_storage, temp_storage_location FILE_SUFFIX_REGEX = '[A-Za-z0-9]{7}' class GetStorageClassTests(SimpleTestCase): def test_get_filesystem_storage(self): """ get_storage_class returns the class for a storage backend name/path. """ self.assertEqual( get_storage_class('django.core.files.storage.FileSystemStorage'), FileSystemStorage) def test_get_invalid_storage_module(self): """ get_storage_class raises an error if the requested import don't exist. """ with self.assertRaisesMessage(ImportError, "No module named 'storage'"): get_storage_class('storage.NonexistentStorage') def test_get_nonexistent_storage_class(self): """ get_storage_class raises an error if the requested class don't exist. """ with self.assertRaises(ImportError): get_storage_class('django.core.files.storage.NonexistentStorage') def test_get_nonexistent_storage_module(self): """ get_storage_class raises an error if the requested module don't exist. """ with self.assertRaisesMessage(ImportError, "No module named 'django.core.files.nonexistent_storage'"): get_storage_class('django.core.files.nonexistent_storage.NonexistentStorage') class FileSystemStorageTests(unittest.TestCase): def test_deconstruction(self): path, args, kwargs = temp_storage.deconstruct() self.assertEqual(path, "django.core.files.storage.FileSystemStorage") self.assertEqual(args, ()) self.assertEqual(kwargs, {'location': temp_storage_location}) kwargs_orig = { 'location': temp_storage_location, 'base_url': 'http://myfiles.example.com/' } storage = FileSystemStorage(**kwargs_orig) path, args, kwargs = storage.deconstruct() self.assertEqual(kwargs, kwargs_orig) def test_lazy_base_url_init(self): """ FileSystemStorage.__init__() shouldn't evaluate base_url. """ storage = FileSystemStorage(base_url=reverse_lazy('app:url')) with self.assertRaises(NoReverseMatch): storage.url(storage.base_url) class FileStorageTests(SimpleTestCase): storage_class = FileSystemStorage def setUp(self): self.temp_dir = tempfile.mkdtemp() self.storage = self.storage_class(location=self.temp_dir, base_url='/test_media_url/') # Set up a second temporary directory which is ensured to have a mixed # case name. self.temp_dir2 = tempfile.mkdtemp(suffix='aBc') def tearDown(self): shutil.rmtree(self.temp_dir) shutil.rmtree(self.temp_dir2) def test_empty_location(self): """ Makes sure an exception is raised if the location is empty """ storage = self.storage_class(location='') self.assertEqual(storage.base_location, '') self.assertEqual(storage.location, os.getcwd()) def test_file_access_options(self): """ Standard file access options are available, and work as expected. """ self.assertFalse(self.storage.exists('storage_test')) f = self.storage.open('storage_test', 'w') f.write('storage contents') f.close() self.assertTrue(self.storage.exists('storage_test')) f = self.storage.open('storage_test', 'r') self.assertEqual(f.read(), 'storage contents') f.close() self.storage.delete('storage_test') self.assertFalse(self.storage.exists('storage_test')) def _test_file_time_getter(self, getter): # Check for correct behavior under both USE_TZ=True and USE_TZ=False. # The tests are similar since they both set up a situation where the # system time zone, Django's TIME_ZONE, and UTC are distinct. self._test_file_time_getter_tz_handling_on(getter) self._test_file_time_getter_tz_handling_off(getter) @override_settings(USE_TZ=True, TIME_ZONE='Africa/Algiers') def _test_file_time_getter_tz_handling_on(self, getter): # Django's TZ (and hence the system TZ) is set to Africa/Algiers which # is UTC+1 and has no DST change. We can set the Django TZ to something # else so that UTC, Django's TIME_ZONE, and the system timezone are all # different. now_in_algiers = timezone.make_aware(datetime.now()) with timezone.override(timezone.get_fixed_timezone(-300)): # At this point the system TZ is +1 and the Django TZ # is -5. The following will be aware in UTC. now = timezone.now() self.assertFalse(self.storage.exists('test.file.tz.on')) f = ContentFile('custom contents') f_name = self.storage.save('test.file.tz.on', f) self.addCleanup(self.storage.delete, f_name) dt = getter(f_name) # dt should be aware, in UTC self.assertTrue(timezone.is_aware(dt)) self.assertEqual(now.tzname(), dt.tzname()) # The three timezones are indeed distinct. naive_now = datetime.now() algiers_offset = now_in_algiers.tzinfo.utcoffset(naive_now) django_offset = timezone.get_current_timezone().utcoffset(naive_now) utc_offset = timezone.utc.utcoffset(naive_now) self.assertGreater(algiers_offset, utc_offset) self.assertLess(django_offset, utc_offset) # dt and now should be the same effective time. self.assertLess(abs(dt - now), timedelta(seconds=2)) @override_settings(USE_TZ=False, TIME_ZONE='Africa/Algiers') def _test_file_time_getter_tz_handling_off(self, getter): # Django's TZ (and hence the system TZ) is set to Africa/Algiers which # is UTC+1 and has no DST change. We can set the Django TZ to something # else so that UTC, Django's TIME_ZONE, and the system timezone are all # different. now_in_algiers = timezone.make_aware(datetime.now()) with timezone.override(timezone.get_fixed_timezone(-300)): # At this point the system TZ is +1 and the Django TZ # is -5. self.assertFalse(self.storage.exists('test.file.tz.off')) f = ContentFile('custom contents') f_name = self.storage.save('test.file.tz.off', f) self.addCleanup(self.storage.delete, f_name) dt = getter(f_name) # dt should be naive, in system (+1) TZ self.assertTrue(timezone.is_naive(dt)) # The three timezones are indeed distinct. naive_now = datetime.now() algiers_offset = now_in_algiers.tzinfo.utcoffset(naive_now) django_offset = timezone.get_current_timezone().utcoffset(naive_now) utc_offset = timezone.utc.utcoffset(naive_now) self.assertGreater(algiers_offset, utc_offset) self.assertLess(django_offset, utc_offset) # dt and naive_now should be the same effective time. self.assertLess(abs(dt - naive_now), timedelta(seconds=2)) # If we convert dt to an aware object using the Algiers # timezone then it should be the same effective time to # now_in_algiers. _dt = timezone.make_aware(dt, now_in_algiers.tzinfo) self.assertLess(abs(_dt - now_in_algiers), timedelta(seconds=2)) def test_file_get_accessed_time(self): """ File storage returns a Datetime object for the last accessed time of a file. """ self.assertFalse(self.storage.exists('test.file')) f = ContentFile('custom contents') f_name = self.storage.save('test.file', f) self.addCleanup(self.storage.delete, f_name) atime = self.storage.get_accessed_time(f_name) self.assertEqual(atime, datetime.fromtimestamp(os.path.getatime(self.storage.path(f_name)))) self.assertLess(timezone.now() - self.storage.get_accessed_time(f_name), timedelta(seconds=2)) @requires_tz_support def test_file_get_accessed_time_timezone(self): self._test_file_time_getter(self.storage.get_accessed_time) def test_file_get_created_time(self): """ File storage returns a datetime for the creation time of a file. """ self.assertFalse(self.storage.exists('test.file')) f = ContentFile('custom contents') f_name = self.storage.save('test.file', f) self.addCleanup(self.storage.delete, f_name) ctime = self.storage.get_created_time(f_name) self.assertEqual(ctime, datetime.fromtimestamp(os.path.getctime(self.storage.path(f_name)))) self.assertLess(timezone.now() - self.storage.get_created_time(f_name), timedelta(seconds=2)) @requires_tz_support def test_file_get_created_time_timezone(self): self._test_file_time_getter(self.storage.get_created_time) def test_file_get_modified_time(self): """ File storage returns a datetime for the last modified time of a file. """ self.assertFalse(self.storage.exists('test.file')) f = ContentFile('custom contents') f_name = self.storage.save('test.file', f) self.addCleanup(self.storage.delete, f_name) mtime = self.storage.get_modified_time(f_name) self.assertEqual(mtime, datetime.fromtimestamp(os.path.getmtime(self.storage.path(f_name)))) self.assertLess(timezone.now() - self.storage.get_modified_time(f_name), timedelta(seconds=2)) @requires_tz_support def test_file_get_modified_time_timezone(self): self._test_file_time_getter(self.storage.get_modified_time) def test_file_save_without_name(self): """ File storage extracts the filename from the content object if no name is given explicitly. """ self.assertFalse(self.storage.exists('test.file')) f = ContentFile('custom contents') f.name = 'test.file' storage_f_name = self.storage.save(None, f) self.assertEqual(storage_f_name, f.name) self.assertTrue(os.path.exists(os.path.join(self.temp_dir, f.name))) self.storage.delete(storage_f_name) def test_file_save_with_path(self): """ Saving a pathname should create intermediate directories as necessary. """ self.assertFalse(self.storage.exists('path/to')) self.storage.save('path/to/test.file', ContentFile('file saved with path')) self.assertTrue(self.storage.exists('path/to')) with self.storage.open('path/to/test.file') as f: self.assertEqual(f.read(), b'file saved with path') self.assertTrue(os.path.exists( os.path.join(self.temp_dir, 'path', 'to', 'test.file'))) self.storage.delete('path/to/test.file') def test_save_doesnt_close(self): with TemporaryUploadedFile('test', 'text/plain', 1, 'utf8') as file: file.write(b'1') file.seek(0) self.assertFalse(file.closed) self.storage.save('path/to/test.file', file) self.assertFalse(file.closed) self.assertFalse(file.file.closed) file = InMemoryUploadedFile(StringIO('1'), '', 'test', 'text/plain', 1, 'utf8') with file: self.assertFalse(file.closed) self.storage.save('path/to/test.file', file) self.assertFalse(file.closed) self.assertFalse(file.file.closed) def test_file_path(self): """ File storage returns the full path of a file """ self.assertFalse(self.storage.exists('test.file')) f = ContentFile('custom contents') f_name = self.storage.save('test.file', f) self.assertEqual(self.storage.path(f_name), os.path.join(self.temp_dir, f_name)) self.storage.delete(f_name) def test_file_url(self): """ File storage returns a url to access a given file from the Web. """ self.assertEqual(self.storage.url('test.file'), self.storage.base_url + 'test.file') # should encode special chars except ~!*()' # like encodeURIComponent() JavaScript function do self.assertEqual( self.storage.url(r"~!*()'@#$%^&*abc`+ =.file"), "/test_media_url/~!*()'%40%23%24%25%5E%26*abc%60%2B%20%3D.file" ) self.assertEqual(self.storage.url("ab\0c"), "/test_media_url/ab%00c") # should translate os path separator(s) to the url path separator self.assertEqual(self.storage.url("""a/b\\c.file"""), "/test_media_url/a/b/c.file") # #25905: remove leading slashes from file names to prevent unsafe url output self.assertEqual(self.storage.url("/evil.com"), "/test_media_url/evil.com") self.assertEqual(self.storage.url(r"\evil.com"), "/test_media_url/evil.com") self.assertEqual(self.storage.url("///evil.com"), "/test_media_url/evil.com") self.assertEqual(self.storage.url(r"\\\evil.com"), "/test_media_url/evil.com") self.assertEqual(self.storage.url(None), "/test_media_url/") def test_base_url(self): """ File storage returns a url even when its base_url is unset or modified. """ self.storage.base_url = None with self.assertRaises(ValueError): self.storage.url('test.file') # #22717: missing ending slash in base_url should be auto-corrected storage = self.storage_class(location=self.temp_dir, base_url='/no_ending_slash') self.assertEqual( storage.url('test.file'), '%s%s' % (storage.base_url, 'test.file') ) def test_listdir(self): """ File storage returns a tuple containing directories and files. """ self.assertFalse(self.storage.exists('storage_test_1')) self.assertFalse(self.storage.exists('storage_test_2')) self.assertFalse(self.storage.exists('storage_dir_1')) self.storage.save('storage_test_1', ContentFile('custom content')) self.storage.save('storage_test_2', ContentFile('custom content')) os.mkdir(os.path.join(self.temp_dir, 'storage_dir_1')) dirs, files = self.storage.listdir('') self.assertEqual(set(dirs), {'storage_dir_1'}) self.assertEqual(set(files), {'storage_test_1', 'storage_test_2'}) self.storage.delete('storage_test_1') self.storage.delete('storage_test_2') os.rmdir(os.path.join(self.temp_dir, 'storage_dir_1')) def test_file_storage_prevents_directory_traversal(self): """ File storage prevents directory traversal (files can only be accessed if they're below the storage location). """ with self.assertRaises(SuspiciousFileOperation): self.storage.exists('..') with self.assertRaises(SuspiciousFileOperation): self.storage.exists('/etc/passwd') def test_file_storage_preserves_filename_case(self): """The storage backend should preserve case of filenames.""" # Create a storage backend associated with the mixed case name # directory. other_temp_storage = self.storage_class(location=self.temp_dir2) # Ask that storage backend to store a file with a mixed case filename. mixed_case = 'CaSe_SeNsItIvE' file = other_temp_storage.open(mixed_case, 'w') file.write('storage contents') file.close() self.assertEqual(os.path.join(self.temp_dir2, mixed_case), other_temp_storage.path(mixed_case)) other_temp_storage.delete(mixed_case) def test_makedirs_race_handling(self): """ File storage should be robust against directory creation race conditions. """ real_makedirs = os.makedirs # Monkey-patch os.makedirs, to simulate a normal call, a raced call, # and an error. def fake_makedirs(path, mode=0o777, exist_ok=False): if path == os.path.join(self.temp_dir, 'normal'): real_makedirs(path, mode, exist_ok) elif path == os.path.join(self.temp_dir, 'raced'): real_makedirs(path, mode, exist_ok) if not exist_ok: raise FileExistsError() elif path == os.path.join(self.temp_dir, 'error'): raise PermissionError() else: self.fail('unexpected argument %r' % path) try: os.makedirs = fake_makedirs self.storage.save('normal/test.file', ContentFile('saved normally')) with self.storage.open('normal/test.file') as f: self.assertEqual(f.read(), b'saved normally') self.storage.save('raced/test.file', ContentFile('saved with race')) with self.storage.open('raced/test.file') as f: self.assertEqual(f.read(), b'saved with race') # Exceptions aside from FileExistsError are raised. with self.assertRaises(PermissionError): self.storage.save('error/test.file', ContentFile('not saved')) finally: os.makedirs = real_makedirs def test_remove_race_handling(self): """ File storage should be robust against file removal race conditions. """ real_remove = os.remove # Monkey-patch os.remove, to simulate a normal call, a raced call, # and an error. def fake_remove(path): if path == os.path.join(self.temp_dir, 'normal.file'): real_remove(path) elif path == os.path.join(self.temp_dir, 'raced.file'): real_remove(path) raise FileNotFoundError() elif path == os.path.join(self.temp_dir, 'error.file'): raise PermissionError() else: self.fail('unexpected argument %r' % path) try: os.remove = fake_remove self.storage.save('normal.file', ContentFile('delete normally')) self.storage.delete('normal.file') self.assertFalse(self.storage.exists('normal.file')) self.storage.save('raced.file', ContentFile('delete with race')) self.storage.delete('raced.file') self.assertFalse(self.storage.exists('normal.file')) # Exceptions aside from FileNotFoundError are raised. self.storage.save('error.file', ContentFile('delete with error')) with self.assertRaises(PermissionError): self.storage.delete('error.file') finally: os.remove = real_remove def test_file_chunks_error(self): """ Test behavior when file.chunks() is raising an error """ f1 = ContentFile('chunks fails') def failing_chunks(): raise OSError f1.chunks = failing_chunks with self.assertRaises(OSError): self.storage.save('error.file', f1) def test_delete_no_name(self): """ Calling delete with an empty name should not try to remove the base storage directory, but fail loudly (#20660). """ with self.assertRaises(AssertionError): self.storage.delete('') def test_delete_deletes_directories(self): tmp_dir = tempfile.mkdtemp(dir=self.storage.location) self.storage.delete(tmp_dir) self.assertFalse(os.path.exists(tmp_dir)) @override_settings( MEDIA_ROOT='media_root', MEDIA_URL='media_url/', FILE_UPLOAD_PERMISSIONS=0o777, FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o777, ) def test_setting_changed(self): """ Properties using settings values as defaults should be updated on referenced settings change while specified values should be unchanged. """ storage = self.storage_class( location='explicit_location', base_url='explicit_base_url/', file_permissions_mode=0o666, directory_permissions_mode=0o666, ) defaults_storage = self.storage_class() settings = { 'MEDIA_ROOT': 'overridden_media_root', 'MEDIA_URL': 'overridden_media_url/', 'FILE_UPLOAD_PERMISSIONS': 0o333, 'FILE_UPLOAD_DIRECTORY_PERMISSIONS': 0o333, } with self.settings(**settings): self.assertEqual(storage.base_location, 'explicit_location') self.assertIn('explicit_location', storage.location) self.assertEqual(storage.base_url, 'explicit_base_url/') self.assertEqual(storage.file_permissions_mode, 0o666) self.assertEqual(storage.directory_permissions_mode, 0o666) self.assertEqual(defaults_storage.base_location, settings['MEDIA_ROOT']) self.assertIn(settings['MEDIA_ROOT'], defaults_storage.location) self.assertEqual(defaults_storage.base_url, settings['MEDIA_URL']) self.assertEqual(defaults_storage.file_permissions_mode, settings['FILE_UPLOAD_PERMISSIONS']) self.assertEqual( defaults_storage.directory_permissions_mode, settings['FILE_UPLOAD_DIRECTORY_PERMISSIONS'] ) class CustomStorage(FileSystemStorage): def get_available_name(self, name, max_length=None): """ Append numbers to duplicate files rather than underscores, like Trac. """ basename, *ext = name.split('.') number = 2 while self.exists(name): name = '.'.join([basename, str(number)] + ext) number += 1 return name class CustomStorageTests(FileStorageTests): storage_class = CustomStorage def test_custom_get_available_name(self): first = self.storage.save('custom_storage', ContentFile('custom contents')) self.assertEqual(first, 'custom_storage') second = self.storage.save('custom_storage', ContentFile('more contents')) self.assertEqual(second, 'custom_storage.2') self.storage.delete(first) self.storage.delete(second) class OverwritingStorage(FileSystemStorage): """ Overwrite existing files instead of appending a suffix to generate an unused name. """ # Mask out O_EXCL so os.open() doesn't raise OSError if the file exists. OS_OPEN_FLAGS = FileSystemStorage.OS_OPEN_FLAGS & ~os.O_EXCL def get_available_name(self, name, max_length=None): """Override the effort to find an used name.""" return name class OverwritingStorageTests(FileStorageTests): storage_class = OverwritingStorage def test_save_overwrite_behavior(self): """Saving to same file name twice overwrites the first file.""" name = 'test.file' self.assertFalse(self.storage.exists(name)) content_1 = b'content one' content_2 = b'second content' f_1 = ContentFile(content_1) f_2 = ContentFile(content_2) stored_name_1 = self.storage.save(name, f_1) try: self.assertEqual(stored_name_1, name) self.assertTrue(self.storage.exists(name)) self.assertTrue(os.path.exists(os.path.join(self.temp_dir, name))) with self.storage.open(name) as fp: self.assertEqual(fp.read(), content_1) stored_name_2 = self.storage.save(name, f_2) self.assertEqual(stored_name_2, name) self.assertTrue(self.storage.exists(name)) self.assertTrue(os.path.exists(os.path.join(self.temp_dir, name))) with self.storage.open(name) as fp: self.assertEqual(fp.read(), content_2) finally: self.storage.delete(name) class DiscardingFalseContentStorage(FileSystemStorage): def _save(self, name, content): if content: return super()._save(name, content) return '' class DiscardingFalseContentStorageTests(FileStorageTests): storage_class = DiscardingFalseContentStorage def test_custom_storage_discarding_empty_content(self): """ When Storage.save() wraps a file-like object in File, it should include the name argument so that bool(file) evaluates to True (#26495). """ output = StringIO('content') self.storage.save('tests/stringio', output) self.assertTrue(self.storage.exists('tests/stringio')) with self.storage.open('tests/stringio') as f: self.assertEqual(f.read(), b'content') class FileFieldStorageTests(TestCase): def tearDown(self): shutil.rmtree(temp_storage_location) def _storage_max_filename_length(self, storage): """ Query filesystem for maximum filename length (e.g. AUFS has 242). """ dir_to_test = storage.location while not os.path.exists(dir_to_test): dir_to_test = os.path.dirname(dir_to_test) try: return os.pathconf(dir_to_test, 'PC_NAME_MAX') except Exception: return 255 # Should be safe on most backends def test_files(self): self.assertIsInstance(Storage.normal, FileDescriptor) # An object without a file has limited functionality. obj1 = Storage() self.assertEqual(obj1.normal.name, "") with self.assertRaises(ValueError): obj1.normal.size # Saving a file enables full functionality. obj1.normal.save("django_test.txt", ContentFile("content")) self.assertEqual(obj1.normal.name, "tests/django_test.txt") self.assertEqual(obj1.normal.size, 7) self.assertEqual(obj1.normal.read(), b"content") obj1.normal.close() # File objects can be assigned to FileField attributes, but shouldn't # get committed until the model it's attached to is saved. obj1.normal = SimpleUploadedFile("assignment.txt", b"content") dirs, files = temp_storage.listdir("tests") self.assertEqual(dirs, []) self.assertNotIn("assignment.txt", files) obj1.save() dirs, files = temp_storage.listdir("tests") self.assertEqual(sorted(files), ["assignment.txt", "django_test.txt"]) # Save another file with the same name. obj2 = Storage() obj2.normal.save("django_test.txt", ContentFile("more content")) obj2_name = obj2.normal.name self.assertRegex(obj2_name, "tests/django_test_%s.txt" % FILE_SUFFIX_REGEX) self.assertEqual(obj2.normal.size, 12) obj2.normal.close() # Deleting an object does not delete the file it uses. obj2.delete() obj2.normal.save("django_test.txt", ContentFile("more content")) self.assertNotEqual(obj2_name, obj2.normal.name) self.assertRegex(obj2.normal.name, "tests/django_test_%s.txt" % FILE_SUFFIX_REGEX) obj2.normal.close() def test_filefield_read(self): # Files can be read in a little at a time, if necessary. obj = Storage.objects.create( normal=SimpleUploadedFile("assignment.txt", b"content")) obj.normal.open() self.assertEqual(obj.normal.read(3), b"con") self.assertEqual(obj.normal.read(), b"tent") self.assertEqual(list(obj.normal.chunks(chunk_size=2)), [b"co", b"nt", b"en", b"t"]) obj.normal.close() def test_filefield_write(self): # Files can be written to. obj = Storage.objects.create(normal=SimpleUploadedFile('rewritten.txt', b'content')) with obj.normal as normal: normal.open('wb') normal.write(b'updated') obj.refresh_from_db() self.assertEqual(obj.normal.read(), b'updated') obj.normal.close() def test_filefield_reopen(self): obj = Storage.objects.create(normal=SimpleUploadedFile('reopen.txt', b'content')) with obj.normal as normal: normal.open() obj.normal.open() obj.normal.file.seek(0) obj.normal.close() def test_duplicate_filename(self): # Multiple files with the same name get _(7 random chars) appended to them. objs = [Storage() for i in range(2)] for o in objs: o.normal.save("multiple_files.txt", ContentFile("Same Content")) try: names = [o.normal.name for o in objs] self.assertEqual(names[0], "tests/multiple_files.txt") self.assertRegex(names[1], "tests/multiple_files_%s.txt" % FILE_SUFFIX_REGEX) finally: for o in objs: o.delete() def test_file_truncation(self): # Given the max_length is limited, when multiple files get uploaded # under the same name, then the filename get truncated in order to fit # in _(7 random chars). When most of the max_length is taken by # dirname + extension and there are not enough characters in the # filename to truncate, an exception should be raised. objs = [Storage() for i in range(2)] filename = 'filename.ext' for o in objs: o.limited_length.save(filename, ContentFile('Same Content')) try: # Testing truncation. names = [o.limited_length.name for o in objs] self.assertEqual(names[0], 'tests/%s' % filename) self.assertRegex(names[1], 'tests/fi_%s.ext' % FILE_SUFFIX_REGEX) # Testing exception is raised when filename is too short to truncate. filename = 'short.longext' objs[0].limited_length.save(filename, ContentFile('Same Content')) with self.assertRaisesMessage(SuspiciousFileOperation, 'Storage can not find an available filename'): objs[1].limited_length.save(*(filename, ContentFile('Same Content'))) finally: for o in objs: o.delete() @unittest.skipIf( sys.platform.startswith('win'), "Windows supports at most 260 characters in a path.", ) def test_extended_length_storage(self): # Testing FileField with max_length > 255. Most systems have filename # length limitation of 255. Path takes extra chars. filename = (self._storage_max_filename_length(temp_storage) - 4) * 'a' # 4 chars for extension. obj = Storage() obj.extended_length.save('%s.txt' % filename, ContentFile('Same Content')) self.assertEqual(obj.extended_length.name, 'tests/%s.txt' % filename) self.assertEqual(obj.extended_length.read(), b'Same Content') obj.extended_length.close() def test_filefield_default(self): # Default values allow an object to access a single file. temp_storage.save('tests/default.txt', ContentFile('default content')) obj = Storage.objects.create() self.assertEqual(obj.default.name, "tests/default.txt") self.assertEqual(obj.default.read(), b"default content") obj.default.close() # But it shouldn't be deleted, even if there are no more objects using # it. obj.delete() obj = Storage() self.assertEqual(obj.default.read(), b"default content") obj.default.close() def test_empty_upload_to(self): # upload_to can be empty, meaning it does not use subdirectory. obj = Storage() obj.empty.save('django_test.txt', ContentFile('more content')) self.assertEqual(obj.empty.name, "django_test.txt") self.assertEqual(obj.empty.read(), b"more content") obj.empty.close() def test_random_upload_to(self): # Verify the fix for #5655, making sure the directory is only # determined once. obj = Storage() obj.random.save("random_file", ContentFile("random content")) self.assertTrue(obj.random.name.endswith("/random_file")) obj.random.close() def test_custom_valid_name_callable_upload_to(self): """ Storage.get_valid_name() should be called when upload_to is a callable. """ obj = Storage() obj.custom_valid_name.save("random_file", ContentFile("random content")) # CustomValidNameStorage.get_valid_name() appends '_valid' to the name self.assertTrue(obj.custom_valid_name.name.endswith("/random_file_valid")) obj.custom_valid_name.close() def test_filefield_pickling(self): # Push an object into the cache to make sure it pickles properly obj = Storage() obj.normal.save("django_test.txt", ContentFile("more content")) obj.normal.close() cache.set("obj", obj) self.assertEqual(cache.get("obj").normal.name, "tests/django_test.txt") def test_file_object(self): # Create sample file temp_storage.save('tests/example.txt', ContentFile('some content')) # Load it as Python file object with open(temp_storage.path('tests/example.txt')) as file_obj: # Save it using storage and read its content temp_storage.save('tests/file_obj', file_obj) self.assertTrue(temp_storage.exists('tests/file_obj')) with temp_storage.open('tests/file_obj') as f: self.assertEqual(f.read(), b'some content') def test_stringio(self): # Test passing StringIO instance as content argument to save output = StringIO() output.write('content') output.seek(0) # Save it and read written file temp_storage.save('tests/stringio', output) self.assertTrue(temp_storage.exists('tests/stringio')) with temp_storage.open('tests/stringio') as f: self.assertEqual(f.read(), b'content') # Tests for a race condition on file saving (#4948). # This is written in such a way that it'll always pass on platforms # without threading. class SlowFile(ContentFile): def chunks(self): time.sleep(1) return super().chunks() class FileSaveRaceConditionTest(SimpleTestCase): def setUp(self): self.storage_dir = tempfile.mkdtemp() self.storage = FileSystemStorage(self.storage_dir) self.thread = threading.Thread(target=self.save_file, args=['conflict']) def tearDown(self): shutil.rmtree(self.storage_dir) def save_file(self, name): name = self.storage.save(name, SlowFile(b"Data")) def test_race_condition(self): self.thread.start() self.save_file('conflict') self.thread.join() files = sorted(os.listdir(self.storage_dir)) self.assertEqual(files[0], 'conflict') self.assertRegex(files[1], 'conflict_%s' % FILE_SUFFIX_REGEX) @unittest.skipIf(sys.platform.startswith('win'), "Windows only partially supports umasks and chmod.") class FileStoragePermissions(unittest.TestCase): def setUp(self): self.umask = 0o027 self.old_umask = os.umask(self.umask) self.storage_dir = tempfile.mkdtemp() def tearDown(self): shutil.rmtree(self.storage_dir) os.umask(self.old_umask) @override_settings(FILE_UPLOAD_PERMISSIONS=0o654) def test_file_upload_permissions(self): self.storage = FileSystemStorage(self.storage_dir) name = self.storage.save("the_file", ContentFile("data")) actual_mode = os.stat(self.storage.path(name))[0] & 0o777 self.assertEqual(actual_mode, 0o654) @override_settings(FILE_UPLOAD_PERMISSIONS=None) def test_file_upload_default_permissions(self): self.storage = FileSystemStorage(self.storage_dir) fname = self.storage.save("some_file", ContentFile("data")) mode = os.stat(self.storage.path(fname))[0] & 0o777 self.assertEqual(mode, 0o666 & ~self.umask) @override_settings(FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o765) def test_file_upload_directory_permissions(self): self.storage = FileSystemStorage(self.storage_dir) name = self.storage.save("the_directory/the_file", ContentFile("data")) dir_mode = os.stat(os.path.dirname(self.storage.path(name)))[0] & 0o777 self.assertEqual(dir_mode, 0o765) @override_settings(FILE_UPLOAD_DIRECTORY_PERMISSIONS=None) def test_file_upload_directory_default_permissions(self): self.storage = FileSystemStorage(self.storage_dir) name = self.storage.save("the_directory/the_file", ContentFile("data")) dir_mode = os.stat(os.path.dirname(self.storage.path(name)))[0] & 0o777 self.assertEqual(dir_mode, 0o777 & ~self.umask) class FileStoragePathParsing(SimpleTestCase): def setUp(self): self.storage_dir = tempfile.mkdtemp() self.storage = FileSystemStorage(self.storage_dir) def tearDown(self): shutil.rmtree(self.storage_dir) def test_directory_with_dot(self): """Regression test for #9610. If the directory name contains a dot and the file name doesn't, make sure we still mangle the file name instead of the directory name. """ self.storage.save('dotted.path/test', ContentFile("1")) self.storage.save('dotted.path/test', ContentFile("2")) files = sorted(os.listdir(os.path.join(self.storage_dir, 'dotted.path'))) self.assertFalse(os.path.exists(os.path.join(self.storage_dir, 'dotted_.path'))) self.assertEqual(files[0], 'test') self.assertRegex(files[1], 'test_%s' % FILE_SUFFIX_REGEX) def test_first_character_dot(self): """ File names with a dot as their first character don't have an extension, and the underscore should get added to the end. """ self.storage.save('dotted.path/.test', ContentFile("1")) self.storage.save('dotted.path/.test', ContentFile("2")) files = sorted(os.listdir(os.path.join(self.storage_dir, 'dotted.path'))) self.assertFalse(os.path.exists(os.path.join(self.storage_dir, 'dotted_.path'))) self.assertEqual(files[0], '.test') self.assertRegex(files[1], '.test_%s' % FILE_SUFFIX_REGEX) class ContentFileStorageTestCase(unittest.TestCase): def setUp(self): self.storage_dir = tempfile.mkdtemp() self.storage = FileSystemStorage(self.storage_dir) def tearDown(self): shutil.rmtree(self.storage_dir) def test_content_saving(self): """ ContentFile can be saved correctly with the filesystem storage, if it was initialized with either bytes or unicode content. """ self.storage.save('bytes.txt', ContentFile(b"content")) self.storage.save('unicode.txt', ContentFile("español")) @override_settings(ROOT_URLCONF='file_storage.urls') class FileLikeObjectTestCase(LiveServerTestCase): """ Test file-like objects (#15644). """ available_apps = [] def setUp(self): self.temp_dir = tempfile.mkdtemp() self.storage = FileSystemStorage(location=self.temp_dir) def tearDown(self): shutil.rmtree(self.temp_dir) def test_urllib_request_urlopen(self): """ Test the File storage API with a file-like object coming from urllib.request.urlopen(). """ file_like_object = urlopen(self.live_server_url + '/') f = File(file_like_object) stored_filename = self.storage.save("remote_file.html", f) remote_file = urlopen(self.live_server_url + '/') with self.storage.open(stored_filename) as stored_file: self.assertEqual(stored_file.read(), remote_file.read())
1b55b53b9eff3657bc5713c3684fa72c2351e23e5d451a189ffdddcb28289f29
import uuid from django.core.exceptions import ImproperlyConfigured from django.test import SimpleTestCase from django.test.utils import override_settings from django.urls import Resolver404, path, resolve, reverse from .converters import DynamicConverter from .views import empty_view included_kwargs = {'base': b'hello', 'value': b'world'} converter_test_data = ( # ('url', ('url_name', 'app_name', {kwargs})), # aGVsbG8= is 'hello' encoded in base64. ('/base64/aGVsbG8=/', ('base64', '', {'value': b'hello'})), ('/base64/aGVsbG8=/subpatterns/d29ybGQ=/', ('subpattern-base64', '', included_kwargs)), ('/base64/aGVsbG8=/namespaced/d29ybGQ=/', ('subpattern-base64', 'namespaced-base64', included_kwargs)), ) @override_settings(ROOT_URLCONF='urlpatterns.path_urls') class SimplifiedURLTests(SimpleTestCase): def test_path_lookup_without_parameters(self): match = resolve('/articles/2003/') self.assertEqual(match.url_name, 'articles-2003') self.assertEqual(match.args, ()) self.assertEqual(match.kwargs, {}) self.assertEqual(match.route, 'articles/2003/') def test_path_lookup_with_typed_parameters(self): match = resolve('/articles/2015/') self.assertEqual(match.url_name, 'articles-year') self.assertEqual(match.args, ()) self.assertEqual(match.kwargs, {'year': 2015}) self.assertEqual(match.route, 'articles/<int:year>/') def test_path_lookup_with_multiple_parameters(self): match = resolve('/articles/2015/04/12/') self.assertEqual(match.url_name, 'articles-year-month-day') self.assertEqual(match.args, ()) self.assertEqual(match.kwargs, {'year': 2015, 'month': 4, 'day': 12}) self.assertEqual(match.route, 'articles/<int:year>/<int:month>/<int:day>/') def test_two_variable_at_start_of_path_pattern(self): match = resolve('/en/foo/') self.assertEqual(match.url_name, 'lang-and-path') self.assertEqual(match.kwargs, {'lang': 'en', 'url': 'foo'}) self.assertEqual(match.route, '<lang>/<path:url>/') def test_re_path(self): match = resolve('/regex/1/') self.assertEqual(match.url_name, 'regex') self.assertEqual(match.kwargs, {'pk': '1'}) self.assertEqual(match.route, '^regex/(?P<pk>[0-9]+)/$') def test_re_path_with_optional_parameter(self): for url, kwargs in ( ('/regex_optional/1/2/', {'arg1': '1', 'arg2': '2'}), ('/regex_optional/1/', {'arg1': '1'}), ): with self.subTest(url=url): match = resolve(url) self.assertEqual(match.url_name, 'regex_optional') self.assertEqual(match.kwargs, kwargs) self.assertEqual( match.route, r'^regex_optional/(?P<arg1>\d+)/(?:(?P<arg2>\d+)/)?', ) def test_path_lookup_with_inclusion(self): match = resolve('/included_urls/extra/something/') self.assertEqual(match.url_name, 'inner-extra') self.assertEqual(match.route, 'included_urls/extra/<extra>/') def test_path_lookup_with_empty_string_inclusion(self): match = resolve('/more/99/') self.assertEqual(match.url_name, 'inner-more') self.assertEqual(match.route, r'^more/(?P<extra>\w+)/$') def test_path_lookup_with_double_inclusion(self): match = resolve('/included_urls/more/some_value/') self.assertEqual(match.url_name, 'inner-more') self.assertEqual(match.route, r'included_urls/more/(?P<extra>\w+)/$') def test_path_reverse_without_parameter(self): url = reverse('articles-2003') self.assertEqual(url, '/articles/2003/') def test_path_reverse_with_parameter(self): url = reverse('articles-year-month-day', kwargs={'year': 2015, 'month': 4, 'day': 12}) self.assertEqual(url, '/articles/2015/4/12/') @override_settings(ROOT_URLCONF='urlpatterns.path_base64_urls') def test_converter_resolve(self): for url, (url_name, app_name, kwargs) in converter_test_data: with self.subTest(url=url): match = resolve(url) self.assertEqual(match.url_name, url_name) self.assertEqual(match.app_name, app_name) self.assertEqual(match.kwargs, kwargs) @override_settings(ROOT_URLCONF='urlpatterns.path_base64_urls') def test_converter_reverse(self): for expected, (url_name, app_name, kwargs) in converter_test_data: if app_name: url_name = '%s:%s' % (app_name, url_name) with self.subTest(url=url_name): url = reverse(url_name, kwargs=kwargs) self.assertEqual(url, expected) @override_settings(ROOT_URLCONF='urlpatterns.path_base64_urls') def test_converter_reverse_with_second_layer_instance_namespace(self): kwargs = included_kwargs.copy() kwargs['last_value'] = b'world' url = reverse('instance-ns-base64:subsubpattern-base64', kwargs=kwargs) self.assertEqual(url, '/base64/aGVsbG8=/subpatterns/d29ybGQ=/d29ybGQ=/') def test_path_inclusion_is_matchable(self): match = resolve('/included_urls/extra/something/') self.assertEqual(match.url_name, 'inner-extra') self.assertEqual(match.kwargs, {'extra': 'something'}) def test_path_inclusion_is_reversible(self): url = reverse('inner-extra', kwargs={'extra': 'something'}) self.assertEqual(url, '/included_urls/extra/something/') def test_invalid_converter(self): msg = "URL route 'foo/<nonexistent:var>/' uses invalid converter 'nonexistent'." with self.assertRaisesMessage(ImproperlyConfigured, msg): path('foo/<nonexistent:var>/', empty_view) @override_settings(ROOT_URLCONF='urlpatterns.converter_urls') class ConverterTests(SimpleTestCase): def test_matching_urls(self): def no_converter(x): return x test_data = ( ('int', {'0', '1', '01', 1234567890}, int), ('str', {'abcxyz'}, no_converter), ('path', {'allows.ANY*characters'}, no_converter), ('slug', {'abcxyz-ABCXYZ_01234567890'}, no_converter), ('uuid', {'39da9369-838e-4750-91a5-f7805cd82839'}, uuid.UUID), ) for url_name, url_suffixes, converter in test_data: for url_suffix in url_suffixes: url = '/%s/%s/' % (url_name, url_suffix) with self.subTest(url=url): match = resolve(url) self.assertEqual(match.url_name, url_name) self.assertEqual(match.kwargs, {url_name: converter(url_suffix)}) # reverse() works with string parameters. string_kwargs = {url_name: url_suffix} self.assertEqual(reverse(url_name, kwargs=string_kwargs), url) # reverse() also works with native types (int, UUID, etc.). if converter is not no_converter: # The converted value might be different for int (a # leading zero is lost in the conversion). converted_value = match.kwargs[url_name] converted_url = '/%s/%s/' % (url_name, converted_value) self.assertEqual(reverse(url_name, kwargs={url_name: converted_value}), converted_url) def test_nonmatching_urls(self): test_data = ( ('int', {'-1', 'letters'}), ('str', {'', '/'}), ('path', {''}), ('slug', {'', 'stars*notallowed'}), ('uuid', { '', '9da9369-838e-4750-91a5-f7805cd82839', '39da9369-838-4750-91a5-f7805cd82839', '39da9369-838e-475-91a5-f7805cd82839', '39da9369-838e-4750-91a-f7805cd82839', '39da9369-838e-4750-91a5-f7805cd8283', }), ) for url_name, url_suffixes in test_data: for url_suffix in url_suffixes: url = '/%s/%s/' % (url_name, url_suffix) with self.subTest(url=url), self.assertRaises(Resolver404): resolve(url) class ParameterRestrictionTests(SimpleTestCase): def test_non_identifier_parameter_name_causes_exception(self): msg = ( "URL route 'hello/<int:1>/' uses parameter name '1' which isn't " "a valid Python identifier." ) with self.assertRaisesMessage(ImproperlyConfigured, msg): path(r'hello/<int:1>/', lambda r: None) def test_allows_non_ascii_but_valid_identifiers(self): # \u0394 is "GREEK CAPITAL LETTER DELTA", a valid identifier. p = path('hello/<str:\u0394>/', lambda r: None) match = p.resolve('hello/1/') self.assertEqual(match.kwargs, {'\u0394': '1'}) @override_settings(ROOT_URLCONF='urlpatterns.path_dynamic_urls') class ConversionExceptionTests(SimpleTestCase): """How are errors in Converter.to_python() and to_url() handled?""" def test_resolve_value_error_means_no_match(self): @DynamicConverter.register_to_python def raises_value_error(value): raise ValueError() with self.assertRaises(Resolver404): resolve('/dynamic/abc/') def test_resolve_type_error_propagates(self): @DynamicConverter.register_to_python def raises_type_error(value): raise TypeError('This type error propagates.') with self.assertRaisesMessage(TypeError, 'This type error propagates.'): resolve('/dynamic/abc/') def test_reverse_value_error_propagates(self): @DynamicConverter.register_to_url def raises_value_error(value): raise ValueError('This value error propagates.') with self.assertRaisesMessage(ValueError, 'This value error propagates.'): reverse('dynamic', kwargs={'value': object()})
d9cfdff778d8d2b346e917e2e699245d691ee2ce51b8b4f56e0aa6de1d423e7d
import json from django.contrib import admin from django.contrib.admin.tests import AdminSeleniumTestCase from django.contrib.admin.views.autocomplete import AutocompleteJsonView from django.contrib.auth.models import Permission, User from django.contrib.contenttypes.models import ContentType from django.http import Http404 from django.test import RequestFactory, override_settings from django.urls import reverse, reverse_lazy from .admin import AnswerAdmin, QuestionAdmin from .models import Answer, Author, Authorship, Book, Question from .tests import AdminViewBasicTestCase PAGINATOR_SIZE = AutocompleteJsonView.paginate_by class AuthorAdmin(admin.ModelAdmin): ordering = ['id'] search_fields = ['id'] class AuthorshipInline(admin.TabularInline): model = Authorship autocomplete_fields = ['author'] class BookAdmin(admin.ModelAdmin): inlines = [AuthorshipInline] site = admin.AdminSite(name='autocomplete_admin') site.register(Question, QuestionAdmin) site.register(Answer, AnswerAdmin) site.register(Author, AuthorAdmin) site.register(Book, BookAdmin) class AutocompleteJsonViewTests(AdminViewBasicTestCase): as_view_args = {'model_admin': QuestionAdmin(Question, site)} factory = RequestFactory() url = reverse_lazy('autocomplete_admin:admin_views_question_autocomplete') @classmethod def setUpTestData(cls): cls.user = User.objects.create_user( username='user', password='secret', email='[email protected]', is_staff=True, ) super().setUpTestData() def test_success(self): q = Question.objects.create(question='Is this a question?') request = self.factory.get(self.url, {'term': 'is'}) request.user = self.superuser response = AutocompleteJsonView.as_view(**self.as_view_args)(request) self.assertEqual(response.status_code, 200) data = json.loads(response.content.decode('utf-8')) self.assertEqual(data, { 'results': [{'id': str(q.pk), 'text': q.question}], 'pagination': {'more': False}, }) def test_must_be_logged_in(self): response = self.client.get(self.url, {'term': ''}) self.assertEqual(response.status_code, 200) self.client.logout() response = self.client.get(self.url, {'term': ''}) self.assertEqual(response.status_code, 302) def test_has_view_or_change_permission_required(self): """ Users require the change permission for the related model to the autocomplete view for it. """ request = self.factory.get(self.url, {'term': 'is'}) self.user.is_staff = True self.user.save() request.user = self.user response = AutocompleteJsonView.as_view(**self.as_view_args)(request) self.assertEqual(response.status_code, 403) self.assertJSONEqual(response.content.decode('utf-8'), {'error': '403 Forbidden'}) for permission in ('view', 'change'): with self.subTest(permission=permission): self.user.user_permissions.clear() p = Permission.objects.get( content_type=ContentType.objects.get_for_model(Question), codename='%s_question' % permission, ) self.user.user_permissions.add(p) request.user = User.objects.get(pk=self.user.pk) response = AutocompleteJsonView.as_view(**self.as_view_args)(request) self.assertEqual(response.status_code, 200) def test_search_use_distinct(self): """ Searching across model relations use QuerySet.distinct() to avoid duplicates. """ q1 = Question.objects.create(question='question 1') q2 = Question.objects.create(question='question 2') q2.related_questions.add(q1) q3 = Question.objects.create(question='question 3') q3.related_questions.add(q1) request = self.factory.get(self.url, {'term': 'question'}) request.user = self.superuser class DistinctQuestionAdmin(QuestionAdmin): search_fields = ['related_questions__question', 'question'] model_admin = DistinctQuestionAdmin(Question, site) response = AutocompleteJsonView.as_view(model_admin=model_admin)(request) self.assertEqual(response.status_code, 200) data = json.loads(response.content.decode('utf-8')) self.assertEqual(len(data['results']), 3) def test_missing_search_fields(self): class EmptySearchAdmin(QuestionAdmin): search_fields = [] model_admin = EmptySearchAdmin(Question, site) msg = 'EmptySearchAdmin must have search_fields for the autocomplete_view.' with self.assertRaisesMessage(Http404, msg): model_admin.autocomplete_view(self.factory.get(self.url)) def test_get_paginator(self): """Search results are paginated.""" Question.objects.bulk_create(Question(question=str(i)) for i in range(PAGINATOR_SIZE + 10)) model_admin = QuestionAdmin(Question, site) model_admin.ordering = ['pk'] # The first page of results. request = self.factory.get(self.url, {'term': ''}) request.user = self.superuser response = AutocompleteJsonView.as_view(model_admin=model_admin)(request) self.assertEqual(response.status_code, 200) data = json.loads(response.content.decode('utf-8')) self.assertEqual(data, { 'results': [{'id': str(q.pk), 'text': q.question} for q in Question.objects.all()[:PAGINATOR_SIZE]], 'pagination': {'more': True}, }) # The second page of results. request = self.factory.get(self.url, {'term': '', 'page': '2'}) request.user = self.superuser response = AutocompleteJsonView.as_view(model_admin=model_admin)(request) self.assertEqual(response.status_code, 200) data = json.loads(response.content.decode('utf-8')) self.assertEqual(data, { 'results': [{'id': str(q.pk), 'text': q.question} for q in Question.objects.all()[PAGINATOR_SIZE:]], 'pagination': {'more': False}, }) @override_settings(ROOT_URLCONF='admin_views.urls') class SeleniumTests(AdminSeleniumTestCase): available_apps = ['admin_views'] + AdminSeleniumTestCase.available_apps def setUp(self): self.superuser = User.objects.create_superuser( username='super', password='secret', email='[email protected]', ) self.admin_login(username='super', password='secret', login_url=reverse('autocomplete_admin:index')) def test_select(self): from selenium.webdriver.common.keys import Keys from selenium.webdriver.support.ui import Select self.selenium.get(self.live_server_url + reverse('autocomplete_admin:admin_views_answer_add')) elem = self.selenium.find_element_by_css_selector('.select2-selection') elem.click() # Open the autocomplete dropdown. results = self.selenium.find_element_by_css_selector('.select2-results') self.assertTrue(results.is_displayed()) option = self.selenium.find_element_by_css_selector('.select2-results__option') self.assertEqual(option.text, 'No results found') elem.click() # Close the autocomplete dropdown. q1 = Question.objects.create(question='Who am I?') Question.objects.bulk_create(Question(question=str(i)) for i in range(PAGINATOR_SIZE + 10)) elem.click() # Reopen the dropdown now that some objects exist. result_container = self.selenium.find_element_by_css_selector('.select2-results') self.assertTrue(result_container.is_displayed()) results = result_container.find_elements_by_css_selector('.select2-results__option') # PAGINATOR_SIZE results and "Loading more results". self.assertEqual(len(results), PAGINATOR_SIZE + 1) search = self.selenium.find_element_by_css_selector('.select2-search__field') # Load next page of results by scrolling to the bottom of the list. for _ in range(len(results)): search.send_keys(Keys.ARROW_DOWN) results = result_container.find_elements_by_css_selector('.select2-results__option') # All objects and "Loading more results". self.assertEqual(len(results), PAGINATOR_SIZE + 11) # Limit the results with the search field. search.send_keys('Who') self.assertTrue(result_container.is_displayed()) results = result_container.find_elements_by_css_selector('.select2-results__option') self.assertEqual(len(results), 1) # Select the result. search.send_keys(Keys.RETURN) select = Select(self.selenium.find_element_by_id('id_question')) self.assertEqual(select.first_selected_option.get_attribute('value'), str(q1.pk)) def test_select_multiple(self): from selenium.webdriver.common.keys import Keys from selenium.webdriver.support.ui import Select self.selenium.get(self.live_server_url + reverse('autocomplete_admin:admin_views_question_add')) elem = self.selenium.find_element_by_css_selector('.select2-selection') elem.click() # Open the autocomplete dropdown. results = self.selenium.find_element_by_css_selector('.select2-results') self.assertTrue(results.is_displayed()) option = self.selenium.find_element_by_css_selector('.select2-results__option') self.assertEqual(option.text, 'No results found') elem.click() # Close the autocomplete dropdown. Question.objects.create(question='Who am I?') Question.objects.bulk_create(Question(question=str(i)) for i in range(PAGINATOR_SIZE + 10)) elem.click() # Reopen the dropdown now that some objects exist. result_container = self.selenium.find_element_by_css_selector('.select2-results') self.assertTrue(result_container.is_displayed()) results = result_container.find_elements_by_css_selector('.select2-results__option') self.assertEqual(len(results), PAGINATOR_SIZE + 1) search = self.selenium.find_element_by_css_selector('.select2-search__field') # Load next page of results by scrolling to the bottom of the list. for _ in range(len(results)): search.send_keys(Keys.ARROW_DOWN) results = result_container.find_elements_by_css_selector('.select2-results__option') self.assertEqual(len(results), 31) # Limit the results with the search field. search.send_keys('Who') self.assertTrue(result_container.is_displayed()) results = result_container.find_elements_by_css_selector('.select2-results__option') self.assertEqual(len(results), 1) # Select the result. search.send_keys(Keys.RETURN) # Reopen the dropdown and add the first result to the selection. elem.click() search.send_keys(Keys.ARROW_DOWN) search.send_keys(Keys.RETURN) select = Select(self.selenium.find_element_by_id('id_related_questions')) self.assertEqual(len(select.all_selected_options), 2) def test_inline_add_another_widgets(self): def assertNoResults(row): elem = row.find_element_by_css_selector('.select2-selection') elem.click() # Open the autocomplete dropdown. results = self.selenium.find_element_by_css_selector('.select2-results') self.assertTrue(results.is_displayed()) option = self.selenium.find_element_by_css_selector('.select2-results__option') self.assertEqual(option.text, 'No results found') # Autocomplete works in rows present when the page loads. self.selenium.get(self.live_server_url + reverse('autocomplete_admin:admin_views_book_add')) rows = self.selenium.find_elements_by_css_selector('.dynamic-authorship_set') self.assertEqual(len(rows), 3) assertNoResults(rows[0]) # Autocomplete works in rows added using the "Add another" button. self.selenium.find_element_by_link_text('Add another Authorship').click() rows = self.selenium.find_elements_by_css_selector('.dynamic-authorship_set') self.assertEqual(len(rows), 4) assertNoResults(rows[-1])
e328a826fb7d308b2721dc78b6cc226d0f6127d229f4669503105944bffce797
import datetime import pickle from decimal import Decimal from operator import attrgetter from unittest import mock from django.contrib.contenttypes.models import ContentType from django.core.exceptions import FieldError from django.db import connection from django.db.models import ( Avg, Case, Count, DecimalField, F, IntegerField, Max, Q, StdDev, Sum, Value, Variance, When, ) from django.db.models.aggregates import Aggregate from django.test import ( TestCase, ignore_warnings, skipUnlessAnyDBFeature, skipUnlessDBFeature, ) from django.test.utils import Approximate from django.utils.deprecation import RemovedInDjango31Warning from .models import ( Alfa, Author, Book, Bravo, Charlie, Clues, Entries, HardbackBook, ItemTag, Publisher, SelfRefFK, Store, WithManualPK, ) class AggregationTests(TestCase): @classmethod def setUpTestData(cls): cls.a1 = Author.objects.create(name='Adrian Holovaty', age=34) cls.a2 = Author.objects.create(name='Jacob Kaplan-Moss', age=35) cls.a3 = Author.objects.create(name='Brad Dayley', age=45) cls.a4 = Author.objects.create(name='James Bennett', age=29) cls.a5 = Author.objects.create(name='Jeffrey Forcier', age=37) cls.a6 = Author.objects.create(name='Paul Bissex', age=29) cls.a7 = Author.objects.create(name='Wesley J. Chun', age=25) cls.a8 = Author.objects.create(name='Peter Norvig', age=57) cls.a9 = Author.objects.create(name='Stuart Russell', age=46) cls.a1.friends.add(cls.a2, cls.a4) cls.a2.friends.add(cls.a1, cls.a7) cls.a4.friends.add(cls.a1) cls.a5.friends.add(cls.a6, cls.a7) cls.a6.friends.add(cls.a5, cls.a7) cls.a7.friends.add(cls.a2, cls.a5, cls.a6) cls.a8.friends.add(cls.a9) cls.a9.friends.add(cls.a8) cls.p1 = Publisher.objects.create(name='Apress', num_awards=3) cls.p2 = Publisher.objects.create(name='Sams', num_awards=1) cls.p3 = Publisher.objects.create(name='Prentice Hall', num_awards=7) cls.p4 = Publisher.objects.create(name='Morgan Kaufmann', num_awards=9) cls.p5 = Publisher.objects.create(name="Jonno's House of Books", num_awards=0) cls.b1 = Book.objects.create( isbn='159059725', name='The Definitive Guide to Django: Web Development Done Right', pages=447, rating=4.5, price=Decimal('30.00'), contact=cls.a1, publisher=cls.p1, pubdate=datetime.date(2007, 12, 6) ) cls.b2 = Book.objects.create( isbn='067232959', name='Sams Teach Yourself Django in 24 Hours', pages=528, rating=3.0, price=Decimal('23.09'), contact=cls.a3, publisher=cls.p2, pubdate=datetime.date(2008, 3, 3) ) cls.b3 = Book.objects.create( isbn='159059996', name='Practical Django Projects', pages=300, rating=4.0, price=Decimal('29.69'), contact=cls.a4, publisher=cls.p1, pubdate=datetime.date(2008, 6, 23) ) cls.b4 = Book.objects.create( isbn='013235613', name='Python Web Development with Django', pages=350, rating=4.0, price=Decimal('29.69'), contact=cls.a5, publisher=cls.p3, pubdate=datetime.date(2008, 11, 3) ) cls.b5 = HardbackBook.objects.create( isbn='013790395', name='Artificial Intelligence: A Modern Approach', pages=1132, rating=4.0, price=Decimal('82.80'), contact=cls.a8, publisher=cls.p3, pubdate=datetime.date(1995, 1, 15), weight=4.5) cls.b6 = HardbackBook.objects.create( isbn='155860191', name='Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', pages=946, rating=5.0, price=Decimal('75.00'), contact=cls.a8, publisher=cls.p4, pubdate=datetime.date(1991, 10, 15), weight=3.7) cls.b1.authors.add(cls.a1, cls.a2) cls.b2.authors.add(cls.a3) cls.b3.authors.add(cls.a4) cls.b4.authors.add(cls.a5, cls.a6, cls.a7) cls.b5.authors.add(cls.a8, cls.a9) cls.b6.authors.add(cls.a8) s1 = Store.objects.create( name='Amazon.com', original_opening=datetime.datetime(1994, 4, 23, 9, 17, 42), friday_night_closing=datetime.time(23, 59, 59) ) s2 = Store.objects.create( name='Books.com', original_opening=datetime.datetime(2001, 3, 15, 11, 23, 37), friday_night_closing=datetime.time(23, 59, 59) ) s3 = Store.objects.create( name="Mamma and Pappa's Books", original_opening=datetime.datetime(1945, 4, 25, 16, 24, 14), friday_night_closing=datetime.time(21, 30) ) s1.books.add(cls.b1, cls.b2, cls.b3, cls.b4, cls.b5, cls.b6) s2.books.add(cls.b1, cls.b3, cls.b5, cls.b6) s3.books.add(cls.b3, cls.b4, cls.b6) def assertObjectAttrs(self, obj, **kwargs): for attr, value in kwargs.items(): self.assertEqual(getattr(obj, attr), value) @ignore_warnings(category=RemovedInDjango31Warning) def test_annotation_with_value(self): values = Book.objects.filter( name='Practical Django Projects', ).annotate( discount_price=F('price') * 2, ).values( 'discount_price', ).annotate(sum_discount=Sum('discount_price')) self.assertSequenceEqual( values, [{'discount_price': Decimal('59.38'), 'sum_discount': Decimal('59.38')}] ) def test_aggregates_in_where_clause(self): """ Regression test for #12822: DatabaseError: aggregates not allowed in WHERE clause The subselect works and returns results equivalent to a query with the IDs listed. Before the corresponding fix for this bug, this test passed in 1.1 and failed in 1.2-beta (trunk). """ qs = Book.objects.values('contact').annotate(Max('id')) qs = qs.order_by('contact').values_list('id__max', flat=True) # don't do anything with the queryset (qs) before including it as a # subquery books = Book.objects.order_by('id') qs1 = books.filter(id__in=qs) qs2 = books.filter(id__in=list(qs)) self.assertEqual(list(qs1), list(qs2)) def test_aggregates_in_where_clause_pre_eval(self): """ Regression test for #12822: DatabaseError: aggregates not allowed in WHERE clause Same as the above test, but evaluates the queryset for the subquery before it's used as a subquery. Before the corresponding fix for this bug, this test failed in both 1.1 and 1.2-beta (trunk). """ qs = Book.objects.values('contact').annotate(Max('id')) qs = qs.order_by('contact').values_list('id__max', flat=True) # force the queryset (qs) for the subquery to be evaluated in its # current state list(qs) books = Book.objects.order_by('id') qs1 = books.filter(id__in=qs) qs2 = books.filter(id__in=list(qs)) self.assertEqual(list(qs1), list(qs2)) @skipUnlessDBFeature('supports_subqueries_in_group_by') def test_annotate_with_extra(self): """ Regression test for #11916: Extra params + aggregation creates incorrect SQL. """ # Oracle doesn't support subqueries in group by clause shortest_book_sql = """ SELECT name FROM aggregation_regress_book b WHERE b.publisher_id = aggregation_regress_publisher.id ORDER BY b.pages LIMIT 1 """ # tests that this query does not raise a DatabaseError due to the full # subselect being (erroneously) added to the GROUP BY parameters qs = Publisher.objects.extra(select={ 'name_of_shortest_book': shortest_book_sql, }).annotate(total_books=Count('book')) # force execution of the query list(qs) def test_aggregate(self): # Ordering requests are ignored self.assertEqual( Author.objects.order_by("name").aggregate(Avg("age")), {"age__avg": Approximate(37.444, places=1)} ) # Implicit ordering is also ignored self.assertEqual( Book.objects.aggregate(Sum("pages")), {"pages__sum": 3703}, ) # Baseline results self.assertEqual( Book.objects.aggregate(Sum('pages'), Avg('pages')), {'pages__sum': 3703, 'pages__avg': Approximate(617.166, places=2)} ) # Empty values query doesn't affect grouping or results self.assertEqual( Book.objects.values().aggregate(Sum('pages'), Avg('pages')), {'pages__sum': 3703, 'pages__avg': Approximate(617.166, places=2)} ) # Aggregate overrides extra selected column self.assertEqual( Book.objects.extra(select={'price_per_page': 'price / pages'}).aggregate(Sum('pages')), {'pages__sum': 3703} ) @ignore_warnings(category=RemovedInDjango31Warning) def test_annotation(self): # Annotations get combined with extra select clauses obj = Book.objects.annotate(mean_auth_age=Avg("authors__age")).extra( select={"manufacture_cost": "price * .5"}).get(pk=self.b2.pk) self.assertObjectAttrs( obj, contact_id=self.a3.id, isbn='067232959', mean_auth_age=45.0, name='Sams Teach Yourself Django in 24 Hours', pages=528, price=Decimal("23.09"), pubdate=datetime.date(2008, 3, 3), publisher_id=self.p2.id, rating=3.0 ) # Different DB backends return different types for the extra select computation self.assertIn(obj.manufacture_cost, (11.545, Decimal('11.545'))) # Order of the annotate/extra in the query doesn't matter obj = Book.objects.extra(select={'manufacture_cost': 'price * .5'}).annotate( mean_auth_age=Avg('authors__age')).get(pk=self.b2.pk) self.assertObjectAttrs( obj, contact_id=self.a3.id, isbn='067232959', mean_auth_age=45.0, name='Sams Teach Yourself Django in 24 Hours', pages=528, price=Decimal("23.09"), pubdate=datetime.date(2008, 3, 3), publisher_id=self.p2.id, rating=3.0 ) # Different DB backends return different types for the extra select computation self.assertIn(obj.manufacture_cost, (11.545, Decimal('11.545'))) # Values queries can be combined with annotate and extra obj = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra( select={'manufacture_cost': 'price * .5'}).values().get(pk=self.b2.pk) manufacture_cost = obj['manufacture_cost'] self.assertIn(manufacture_cost, (11.545, Decimal('11.545'))) del obj['manufacture_cost'] self.assertEqual(obj, { 'id': self.b2.id, 'contact_id': self.a3.id, 'isbn': '067232959', 'mean_auth_age': 45.0, 'name': 'Sams Teach Yourself Django in 24 Hours', 'pages': 528, 'price': Decimal('23.09'), 'pubdate': datetime.date(2008, 3, 3), 'publisher_id': self.p2.id, 'rating': 3.0, }) # The order of the (empty) values, annotate and extra clauses doesn't # matter obj = Book.objects.values().annotate(mean_auth_age=Avg('authors__age')).extra( select={'manufacture_cost': 'price * .5'}).get(pk=self.b2.pk) manufacture_cost = obj['manufacture_cost'] self.assertIn(manufacture_cost, (11.545, Decimal('11.545'))) del obj['manufacture_cost'] self.assertEqual(obj, { 'id': self.b2.id, 'contact_id': self.a3.id, 'isbn': '067232959', 'mean_auth_age': 45.0, 'name': 'Sams Teach Yourself Django in 24 Hours', 'pages': 528, 'price': Decimal('23.09'), 'pubdate': datetime.date(2008, 3, 3), 'publisher_id': self.p2.id, 'rating': 3.0 }) # If the annotation precedes the values clause, it won't be included # unless it is explicitly named obj = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra( select={'price_per_page': 'price / pages'}).values('name').get(pk=self.b1.pk) self.assertEqual(obj, { "name": 'The Definitive Guide to Django: Web Development Done Right', }) obj = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra( select={'price_per_page': 'price / pages'}).values('name', 'mean_auth_age').get(pk=self.b1.pk) self.assertEqual(obj, { 'mean_auth_age': 34.5, 'name': 'The Definitive Guide to Django: Web Development Done Right', }) # If an annotation isn't included in the values, it can still be used # in a filter with ignore_warnings(category=RemovedInDjango31Warning): qs = Book.objects.annotate(n_authors=Count('authors')).values('name').filter(n_authors__gt=2) self.assertSequenceEqual( qs, [ {"name": 'Python Web Development with Django'} ], ) # The annotations are added to values output if values() precedes # annotate() obj = Book.objects.values('name').annotate(mean_auth_age=Avg('authors__age')).extra( select={'price_per_page': 'price / pages'}).get(pk=self.b1.pk) self.assertEqual(obj, { 'mean_auth_age': 34.5, 'name': 'The Definitive Guide to Django: Web Development Done Right', }) # All of the objects are getting counted (allow_nulls) and that values # respects the amount of objects self.assertEqual( len(Author.objects.annotate(Avg('friends__age')).values()), 9 ) # Consecutive calls to annotate accumulate in the query qs = ( Book.objects .values('price') .annotate(oldest=Max('authors__age')) .order_by('oldest', 'price') .annotate(Max('publisher__num_awards')) ) self.assertSequenceEqual( qs, [ {'price': Decimal("30"), 'oldest': 35, 'publisher__num_awards__max': 3}, {'price': Decimal("29.69"), 'oldest': 37, 'publisher__num_awards__max': 7}, {'price': Decimal("23.09"), 'oldest': 45, 'publisher__num_awards__max': 1}, {'price': Decimal("75"), 'oldest': 57, 'publisher__num_awards__max': 9}, {'price': Decimal("82.8"), 'oldest': 57, 'publisher__num_awards__max': 7} ], ) def test_aggregate_annotation(self): # Aggregates can be composed over annotations. # The return type is derived from the composed aggregate vals = ( Book.objects .all() .annotate(num_authors=Count('authors__id')) .aggregate(Max('pages'), Max('price'), Sum('num_authors'), Avg('num_authors')) ) self.assertEqual(vals, { 'num_authors__sum': 10, 'num_authors__avg': Approximate(1.666, places=2), 'pages__max': 1132, 'price__max': Decimal("82.80") }) # Regression for #15624 - Missing SELECT columns when using values, annotate # and aggregate in a single query self.assertEqual( Book.objects.annotate(c=Count('authors')).values('c').aggregate(Max('c')), {'c__max': 3} ) def test_conditional_aggregate(self): # Conditional aggregation of a grouped queryset. self.assertEqual( Book.objects.annotate(c=Count('authors')).values('pk').aggregate(test=Sum( Case(When(c__gt=1, then=1), output_field=IntegerField()) ))['test'], 3 ) def test_sliced_conditional_aggregate(self): self.assertEqual( Author.objects.all()[:5].aggregate(test=Sum(Case( When(age__lte=35, then=1), output_field=IntegerField() )))['test'], 3 ) def test_annotated_conditional_aggregate(self): annotated_qs = Book.objects.annotate(discount_price=F('price') * 0.75) self.assertAlmostEqual( annotated_qs.aggregate(test=Avg(Case( When(pages__lt=400, then='discount_price'), output_field=DecimalField() )))['test'], Decimal('22.27'), places=2 ) def test_distinct_conditional_aggregate(self): self.assertEqual( Book.objects.distinct().aggregate(test=Avg(Case( When(price=Decimal('29.69'), then='pages'), output_field=IntegerField() )))['test'], 325 ) def test_conditional_aggregate_on_complex_condition(self): self.assertEqual( Book.objects.distinct().aggregate(test=Avg(Case( When(Q(price__gte=Decimal('29')) & Q(price__lt=Decimal('30')), then='pages'), output_field=IntegerField() )))['test'], 325 ) def test_decimal_aggregate_annotation_filter(self): """ Filtering on an aggregate annotation with Decimal values should work. Requires special handling on SQLite (#18247). """ self.assertEqual( len(Author.objects.annotate(sum=Sum('book_contact_set__price')).filter(sum__gt=Decimal(40))), 1 ) self.assertEqual( len(Author.objects.annotate(sum=Sum('book_contact_set__price')).filter(sum__lte=Decimal(40))), 4 ) def test_field_error(self): # Bad field requests in aggregates are caught and reported msg = ( "Cannot resolve keyword 'foo' into field. Choices are: authors, " "contact, contact_id, hardbackbook, id, isbn, name, pages, price, " "pubdate, publisher, publisher_id, rating, store, tags" ) with self.assertRaisesMessage(FieldError, msg): Book.objects.all().aggregate(num_authors=Count('foo')) with self.assertRaisesMessage(FieldError, msg): Book.objects.all().annotate(num_authors=Count('foo')) msg = ( "Cannot resolve keyword 'foo' into field. Choices are: authors, " "contact, contact_id, hardbackbook, id, isbn, name, num_authors, " "pages, price, pubdate, publisher, publisher_id, rating, store, tags" ) with self.assertRaisesMessage(FieldError, msg): Book.objects.all().annotate(num_authors=Count('authors__id')).aggregate(Max('foo')) @ignore_warnings(category=RemovedInDjango31Warning) def test_more(self): # Old-style count aggregations can be mixed with new-style self.assertEqual( Book.objects.annotate(num_authors=Count('authors')).count(), 6 ) # Non-ordinal, non-computed Aggregates over annotations correctly # inherit the annotation's internal type if the annotation is ordinal # or computed vals = Book.objects.annotate(num_authors=Count('authors')).aggregate(Max('num_authors')) self.assertEqual( vals, {'num_authors__max': 3} ) vals = Publisher.objects.annotate(avg_price=Avg('book__price')).aggregate(Max('avg_price')) self.assertEqual( vals, {'avg_price__max': 75.0} ) # Aliases are quoted to protected aliases that might be reserved names vals = Book.objects.aggregate(number=Max('pages'), select=Max('pages')) self.assertEqual( vals, {'number': 1132, 'select': 1132} ) # Regression for #10064: select_related() plays nice with aggregates obj = Book.objects.select_related('publisher').annotate( num_authors=Count('authors')).values().get(isbn='013790395') self.assertEqual(obj, { 'contact_id': self.a8.id, 'id': self.b5.id, 'isbn': '013790395', 'name': 'Artificial Intelligence: A Modern Approach', 'num_authors': 2, 'pages': 1132, 'price': Decimal("82.8"), 'pubdate': datetime.date(1995, 1, 15), 'publisher_id': self.p3.id, 'rating': 4.0, }) # Regression for #10010: exclude on an aggregate field is correctly # negated self.assertEqual( len(Book.objects.annotate(num_authors=Count('authors'))), 6 ) self.assertEqual( len(Book.objects.annotate(num_authors=Count('authors')).filter(num_authors__gt=2)), 1 ) self.assertEqual( len(Book.objects.annotate(num_authors=Count('authors')).exclude(num_authors__gt=2)), 5 ) self.assertEqual( len( Book.objects .annotate(num_authors=Count('authors')) .filter(num_authors__lt=3) .exclude(num_authors__lt=2) ), 2 ) self.assertEqual( len( Book.objects .annotate(num_authors=Count('authors')) .exclude(num_authors__lt=2) .filter(num_authors__lt=3) ), 2 ) def test_aggregate_fexpr(self): # Aggregates can be used with F() expressions # ... where the F() is pushed into the HAVING clause qs = ( Publisher.objects .annotate(num_books=Count('book')) .filter(num_books__lt=F('num_awards') / 2) .order_by('name') .values('name', 'num_books', 'num_awards') ) self.assertSequenceEqual( qs, [ {'num_books': 1, 'name': 'Morgan Kaufmann', 'num_awards': 9}, {'num_books': 2, 'name': 'Prentice Hall', 'num_awards': 7} ], ) qs = ( Publisher.objects .annotate(num_books=Count('book')) .exclude(num_books__lt=F('num_awards') / 2) .order_by('name') .values('name', 'num_books', 'num_awards') ) self.assertSequenceEqual( qs, [ {'num_books': 2, 'name': 'Apress', 'num_awards': 3}, {'num_books': 0, 'name': "Jonno's House of Books", 'num_awards': 0}, {'num_books': 1, 'name': 'Sams', 'num_awards': 1} ], ) # ... and where the F() references an aggregate qs = ( Publisher.objects .annotate(num_books=Count('book')) .filter(num_awards__gt=2 * F('num_books')) .order_by('name') .values('name', 'num_books', 'num_awards') ) self.assertSequenceEqual( qs, [ {'num_books': 1, 'name': 'Morgan Kaufmann', 'num_awards': 9}, {'num_books': 2, 'name': 'Prentice Hall', 'num_awards': 7} ], ) qs = ( Publisher.objects .annotate(num_books=Count('book')) .exclude(num_books__lt=F('num_awards') / 2) .order_by('name') .values('name', 'num_books', 'num_awards') ) self.assertSequenceEqual( qs, [ {'num_books': 2, 'name': 'Apress', 'num_awards': 3}, {'num_books': 0, 'name': "Jonno's House of Books", 'num_awards': 0}, {'num_books': 1, 'name': 'Sams', 'num_awards': 1} ], ) def test_db_col_table(self): # Tests on fields with non-default table and column names. qs = ( Clues.objects .values('EntryID__Entry') .annotate(Appearances=Count('EntryID'), Distinct_Clues=Count('Clue', distinct=True)) ) self.assertQuerysetEqual(qs, []) qs = Entries.objects.annotate(clue_count=Count('clues__ID')) self.assertQuerysetEqual(qs, []) def test_boolean_conversion(self): # Aggregates mixed up ordering of columns for backend's convert_values # method. Refs #21126. e = Entries.objects.create(Entry='foo') c = Clues.objects.create(EntryID=e, Clue='bar') qs = Clues.objects.select_related('EntryID').annotate(Count('ID')) self.assertSequenceEqual(qs, [c]) self.assertEqual(qs[0].EntryID, e) self.assertIs(qs[0].EntryID.Exclude, False) def test_empty(self): # Regression for #10089: Check handling of empty result sets with # aggregates self.assertEqual( Book.objects.filter(id__in=[]).count(), 0 ) vals = ( Book.objects .filter(id__in=[]) .aggregate( num_authors=Count('authors'), avg_authors=Avg('authors'), max_authors=Max('authors'), max_price=Max('price'), max_rating=Max('rating'), ) ) self.assertEqual( vals, {'max_authors': None, 'max_rating': None, 'num_authors': 0, 'avg_authors': None, 'max_price': None} ) qs = ( Publisher.objects .filter(name="Jonno's House of Books") .annotate( num_authors=Count('book__authors'), avg_authors=Avg('book__authors'), max_authors=Max('book__authors'), max_price=Max('book__price'), max_rating=Max('book__rating'), ).values() ) self.assertSequenceEqual( qs, [{ 'max_authors': None, 'name': "Jonno's House of Books", 'num_awards': 0, 'max_price': None, 'num_authors': 0, 'max_rating': None, 'id': self.p5.id, 'avg_authors': None, }], ) def test_more_more(self): # Regression for #10113 - Fields mentioned in order_by() must be # included in the GROUP BY. This only becomes a problem when the # order_by introduces a new join. self.assertQuerysetEqual( Book.objects.annotate(num_authors=Count('authors')).order_by('publisher__name', 'name'), [ "Practical Django Projects", "The Definitive Guide to Django: Web Development Done Right", "Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp", "Artificial Intelligence: A Modern Approach", "Python Web Development with Django", "Sams Teach Yourself Django in 24 Hours", ], lambda b: b.name ) # Regression for #10127 - Empty select_related() works with annotate qs = Book.objects.filter(rating__lt=4.5).select_related().annotate(Avg('authors__age')).order_by('name') self.assertQuerysetEqual( qs, [ ('Artificial Intelligence: A Modern Approach', 51.5, 'Prentice Hall', 'Peter Norvig'), ('Practical Django Projects', 29.0, 'Apress', 'James Bennett'), ( 'Python Web Development with Django', Approximate(30.333, places=2), 'Prentice Hall', 'Jeffrey Forcier', ), ('Sams Teach Yourself Django in 24 Hours', 45.0, 'Sams', 'Brad Dayley') ], lambda b: (b.name, b.authors__age__avg, b.publisher.name, b.contact.name) ) # Regression for #10132 - If the values() clause only mentioned extra # (select=) columns, those columns are used for grouping qs = Book.objects.extra(select={'pub': 'publisher_id'}).values('pub').annotate(Count('id')).order_by('pub') self.assertSequenceEqual( qs, [ {'pub': self.b1.id, 'id__count': 2}, {'pub': self.b2.id, 'id__count': 1}, {'pub': self.b3.id, 'id__count': 2}, {'pub': self.b4.id, 'id__count': 1} ], ) qs = ( Book.objects .extra(select={'pub': 'publisher_id', 'foo': 'pages'}) .values('pub') .annotate(Count('id')) .order_by('pub') ) self.assertSequenceEqual( qs, [ {'pub': self.p1.id, 'id__count': 2}, {'pub': self.p2.id, 'id__count': 1}, {'pub': self.p3.id, 'id__count': 2}, {'pub': self.p4.id, 'id__count': 1} ], ) # Regression for #10182 - Queries with aggregate calls are correctly # realiased when used in a subquery ids = ( Book.objects .filter(pages__gt=100) .annotate(n_authors=Count('authors')) .filter(n_authors__gt=2) .order_by('n_authors') ) self.assertQuerysetEqual( Book.objects.filter(id__in=ids), [ "Python Web Development with Django", ], lambda b: b.name ) # Regression for #15709 - Ensure each group_by field only exists once # per query qstr = str(Book.objects.values('publisher').annotate(max_pages=Max('pages')).order_by().query) # There is just one GROUP BY clause (zero commas means at most one clause). self.assertEqual(qstr[qstr.index('GROUP BY'):].count(', '), 0) def test_duplicate_alias(self): # Regression for #11256 - duplicating a default alias raises ValueError. msg = ( "The named annotation 'authors__age__avg' conflicts with " "the default name for another annotation." ) with self.assertRaisesMessage(ValueError, msg): Book.objects.all().annotate(Avg('authors__age'), authors__age__avg=Avg('authors__age')) def test_field_name_conflict(self): # Regression for #11256 - providing an aggregate name # that conflicts with a field name on the model raises ValueError msg = "The annotation 'age' conflicts with a field on the model." with self.assertRaisesMessage(ValueError, msg): Author.objects.annotate(age=Avg('friends__age')) def test_m2m_name_conflict(self): # Regression for #11256 - providing an aggregate name # that conflicts with an m2m name on the model raises ValueError msg = "The annotation 'friends' conflicts with a field on the model." with self.assertRaisesMessage(ValueError, msg): Author.objects.annotate(friends=Count('friends')) def test_fk_attname_conflict(self): msg = "The annotation 'contact_id' conflicts with a field on the model." with self.assertRaisesMessage(ValueError, msg): Book.objects.annotate(contact_id=F('publisher_id')) def test_values_queryset_non_conflict(self): # Regression for #14707 -- If you're using a values query set, some potential conflicts are avoided. # age is a field on Author, so it shouldn't be allowed as an aggregate. # But age isn't included in values(), so it is. results = Author.objects.values('name').annotate(age=Count('book_contact_set')).order_by('name') self.assertEqual(len(results), 9) self.assertEqual(results[0]['name'], 'Adrian Holovaty') self.assertEqual(results[0]['age'], 1) # Same problem, but aggregating over m2m fields results = Author.objects.values('name').annotate(age=Avg('friends__age')).order_by('name') self.assertEqual(len(results), 9) self.assertEqual(results[0]['name'], 'Adrian Holovaty') self.assertEqual(results[0]['age'], 32.0) # Same problem, but colliding with an m2m field results = Author.objects.values('name').annotate(friends=Count('friends')).order_by('name') self.assertEqual(len(results), 9) self.assertEqual(results[0]['name'], 'Adrian Holovaty') self.assertEqual(results[0]['friends'], 2) def test_reverse_relation_name_conflict(self): # Regression for #11256 - providing an aggregate name # that conflicts with a reverse-related name on the model raises ValueError msg = "The annotation 'book_contact_set' conflicts with a field on the model." with self.assertRaisesMessage(ValueError, msg): Author.objects.annotate(book_contact_set=Avg('friends__age')) @ignore_warnings(category=RemovedInDjango31Warning) def test_pickle(self): # Regression for #10197 -- Queries with aggregates can be pickled. # First check that pickling is possible at all. No crash = success qs = Book.objects.annotate(num_authors=Count('authors')) pickle.dumps(qs) # Then check that the round trip works. query = qs.query.get_compiler(qs.db).as_sql()[0] qs2 = pickle.loads(pickle.dumps(qs)) self.assertEqual( qs2.query.get_compiler(qs2.db).as_sql()[0], query, ) def test_more_more_more(self): # Regression for #10199 - Aggregate calls clone the original query so # the original query can still be used books = Book.objects.all() books.aggregate(Avg("authors__age")) self.assertQuerysetEqual( books.all(), [ 'Artificial Intelligence: A Modern Approach', 'Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 'Practical Django Projects', 'Python Web Development with Django', 'Sams Teach Yourself Django in 24 Hours', 'The Definitive Guide to Django: Web Development Done Right' ], lambda b: b.name ) # Regression for #10248 - Annotations work with dates() qs = Book.objects.annotate(num_authors=Count('authors')).filter(num_authors=2).dates('pubdate', 'day') self.assertSequenceEqual( qs, [ datetime.date(1995, 1, 15), datetime.date(2007, 12, 6), ], ) # Regression for #10290 - extra selects with parameters can be used for # grouping. qs = ( Book.objects .annotate(mean_auth_age=Avg('authors__age')) .extra(select={'sheets': '(pages + %s) / %s'}, select_params=[1, 2]) .order_by('sheets') .values('sheets') ) self.assertQuerysetEqual( qs, [ 150, 175, 224, 264, 473, 566 ], lambda b: int(b["sheets"]) ) # Regression for 10425 - annotations don't get in the way of a count() # clause self.assertEqual( Book.objects.values('publisher').annotate(Count('publisher')).count(), 4 ) self.assertEqual( Book.objects.annotate(Count('publisher')).values('publisher').count(), 6 ) # Note: intentionally no order_by(), that case needs tests, too. publishers = Publisher.objects.filter(id__in=[1, 2]) self.assertEqual( sorted(p.name for p in publishers), [ "Apress", "Sams" ] ) publishers = publishers.annotate(n_books=Count("book")) sorted_publishers = sorted(publishers, key=lambda x: x.name) self.assertEqual( sorted_publishers[0].n_books, 2 ) self.assertEqual( sorted_publishers[1].n_books, 1 ) self.assertEqual( sorted(p.name for p in publishers), [ "Apress", "Sams" ] ) books = Book.objects.filter(publisher__in=publishers) self.assertQuerysetEqual( books, [ "Practical Django Projects", "Sams Teach Yourself Django in 24 Hours", "The Definitive Guide to Django: Web Development Done Right", ], lambda b: b.name ) self.assertEqual( sorted(p.name for p in publishers), [ "Apress", "Sams" ] ) # Regression for 10666 - inherited fields work with annotations and # aggregations self.assertEqual( HardbackBook.objects.aggregate(n_pages=Sum('book_ptr__pages')), {'n_pages': 2078} ) self.assertEqual( HardbackBook.objects.aggregate(n_pages=Sum('pages')), {'n_pages': 2078}, ) qs = HardbackBook.objects.annotate( n_authors=Count('book_ptr__authors'), ).values('name', 'n_authors').order_by('name') self.assertSequenceEqual( qs, [ {'n_authors': 2, 'name': 'Artificial Intelligence: A Modern Approach'}, { 'n_authors': 1, 'name': 'Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp' } ], ) qs = HardbackBook.objects.annotate(n_authors=Count('authors')).values('name', 'n_authors').order_by('name') self.assertSequenceEqual( qs, [ {'n_authors': 2, 'name': 'Artificial Intelligence: A Modern Approach'}, { 'n_authors': 1, 'name': 'Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp' } ], ) # Regression for #10766 - Shouldn't be able to reference an aggregate # fields in an aggregate() call. msg = "Cannot compute Avg('mean_age'): 'mean_age' is an aggregate" with self.assertRaisesMessage(FieldError, msg): Book.objects.annotate(mean_age=Avg('authors__age')).annotate(Avg('mean_age')) def test_empty_filter_count(self): self.assertEqual( Author.objects.filter(id__in=[]).annotate(Count("friends")).count(), 0 ) def test_empty_filter_aggregate(self): self.assertEqual( Author.objects.filter(id__in=[]).annotate(Count("friends")).aggregate(Count("pk")), {"pk__count": None} ) def test_none_call_before_aggregate(self): # Regression for #11789 self.assertEqual( Author.objects.none().aggregate(Avg('age')), {'age__avg': None} ) def test_annotate_and_join(self): self.assertEqual( Author.objects.annotate(c=Count("friends__name")).exclude(friends__name="Joe").count(), Author.objects.count() ) def test_f_expression_annotation(self): # Books with less than 200 pages per author. qs = Book.objects.values("name").annotate( n_authors=Count("authors") ).filter( pages__lt=F("n_authors") * 200 ).values_list("pk") self.assertQuerysetEqual( Book.objects.filter(pk__in=qs), [ "Python Web Development with Django" ], attrgetter("name") ) def test_values_annotate_values(self): qs = Book.objects.values("name").annotate( n_authors=Count("authors") ).values_list("pk", flat=True).order_by('name') self.assertEqual(list(qs), list(Book.objects.values_list("pk", flat=True))) def test_having_group_by(self): # When a field occurs on the LHS of a HAVING clause that it # appears correctly in the GROUP BY clause qs = Book.objects.values_list("name").annotate( n_authors=Count("authors") ).filter( pages__gt=F("n_authors") ).values_list("name", flat=True).order_by('name') # Results should be the same, all Books have more pages than authors self.assertEqual( list(qs), list(Book.objects.values_list("name", flat=True)) ) def test_values_list_annotation_args_ordering(self): """ Annotate *args ordering should be preserved in values_list results. **kwargs comes after *args. Regression test for #23659. """ books = Book.objects.values_list("publisher__name").annotate( Count("id"), Avg("price"), Avg("authors__age"), avg_pgs=Avg("pages") ).order_by("-publisher__name") self.assertEqual(books[0], ('Sams', 1, Decimal('23.09'), 45.0, 528.0)) def test_annotation_disjunction(self): qs = Book.objects.annotate(n_authors=Count("authors")).filter( Q(n_authors=2) | Q(name="Python Web Development with Django") ).order_by('name') self.assertQuerysetEqual( qs, [ "Artificial Intelligence: A Modern Approach", "Python Web Development with Django", "The Definitive Guide to Django: Web Development Done Right", ], attrgetter("name") ) qs = ( Book.objects .annotate(n_authors=Count("authors")) .filter( Q(name="The Definitive Guide to Django: Web Development Done Right") | (Q(name="Artificial Intelligence: A Modern Approach") & Q(n_authors=3)) ) ).order_by('name') self.assertQuerysetEqual( qs, [ "The Definitive Guide to Django: Web Development Done Right", ], attrgetter("name") ) qs = Publisher.objects.annotate( rating_sum=Sum("book__rating"), book_count=Count("book") ).filter( Q(rating_sum__gt=5.5) | Q(rating_sum__isnull=True) ).order_by('pk') self.assertQuerysetEqual( qs, [ "Apress", "Prentice Hall", "Jonno's House of Books", ], attrgetter("name") ) qs = Publisher.objects.annotate( rating_sum=Sum("book__rating"), book_count=Count("book") ).filter( Q(rating_sum__gt=F("book_count")) | Q(rating_sum=None) ).order_by("num_awards") self.assertQuerysetEqual( qs, [ "Jonno's House of Books", "Sams", "Apress", "Prentice Hall", "Morgan Kaufmann" ], attrgetter("name") ) def test_quoting_aggregate_order_by(self): qs = Book.objects.filter( name="Python Web Development with Django" ).annotate( authorCount=Count("authors") ).order_by("authorCount") self.assertQuerysetEqual( qs, [ ("Python Web Development with Django", 3), ], lambda b: (b.name, b.authorCount) ) def test_stddev(self): self.assertEqual( Book.objects.aggregate(StdDev('pages')), {'pages__stddev': Approximate(311.46, 1)} ) self.assertEqual( Book.objects.aggregate(StdDev('rating')), {'rating__stddev': Approximate(0.60, 1)} ) self.assertEqual( Book.objects.aggregate(StdDev('price')), {'price__stddev': Approximate(Decimal('24.16'), 2)} ) self.assertEqual( Book.objects.aggregate(StdDev('pages', sample=True)), {'pages__stddev': Approximate(341.19, 2)} ) self.assertEqual( Book.objects.aggregate(StdDev('rating', sample=True)), {'rating__stddev': Approximate(0.66, 2)} ) self.assertEqual( Book.objects.aggregate(StdDev('price', sample=True)), {'price__stddev': Approximate(Decimal('26.46'), 1)} ) self.assertEqual( Book.objects.aggregate(Variance('pages')), {'pages__variance': Approximate(97010.80, 1)} ) self.assertEqual( Book.objects.aggregate(Variance('rating')), {'rating__variance': Approximate(0.36, 1)} ) self.assertEqual( Book.objects.aggregate(Variance('price')), {'price__variance': Approximate(Decimal('583.77'), 1)} ) self.assertEqual( Book.objects.aggregate(Variance('pages', sample=True)), {'pages__variance': Approximate(116412.96, 1)} ) self.assertEqual( Book.objects.aggregate(Variance('rating', sample=True)), {'rating__variance': Approximate(0.44, 2)} ) self.assertEqual( Book.objects.aggregate(Variance('price', sample=True)), {'price__variance': Approximate(Decimal('700.53'), 2)} ) def test_filtering_by_annotation_name(self): # Regression test for #14476 # The name of the explicitly provided annotation name in this case # poses no problem qs = Author.objects.annotate(book_cnt=Count('book')).filter(book_cnt=2).order_by('name') self.assertQuerysetEqual( qs, ['Peter Norvig'], lambda b: b.name ) # Neither in this case qs = Author.objects.annotate(book_count=Count('book')).filter(book_count=2).order_by('name') self.assertQuerysetEqual( qs, ['Peter Norvig'], lambda b: b.name ) # This case used to fail because the ORM couldn't resolve the # automatically generated annotation name `book__count` qs = Author.objects.annotate(Count('book')).filter(book__count=2).order_by('name') self.assertQuerysetEqual( qs, ['Peter Norvig'], lambda b: b.name ) # Referencing the auto-generated name in an aggregate() also works. self.assertEqual( Author.objects.annotate(Count('book')).aggregate(Max('book__count')), {'book__count__max': 2} ) @ignore_warnings(category=RemovedInDjango31Warning) def test_annotate_joins(self): """ The base table's join isn't promoted to LOUTER. This could cause the query generation to fail if there is an exclude() for fk-field in the query, too. Refs #19087. """ qs = Book.objects.annotate(n=Count('pk')) self.assertIs(qs.query.alias_map['aggregation_regress_book'].join_type, None) # The query executes without problems. self.assertEqual(len(qs.exclude(publisher=-1)), 6) @skipUnlessAnyDBFeature('allows_group_by_pk', 'allows_group_by_selected_pks') def test_aggregate_duplicate_columns(self): # Regression test for #17144 results = Author.objects.annotate(num_contacts=Count('book_contact_set')) # There should only be one GROUP BY clause, for the `id` column. # `name` and `age` should not be grouped on. _, _, group_by = results.query.get_compiler(using='default').pre_sql_setup() self.assertEqual(len(group_by), 1) self.assertIn('id', group_by[0][0]) self.assertNotIn('name', group_by[0][0]) self.assertNotIn('age', group_by[0][0]) self.assertEqual( [(a.name, a.num_contacts) for a in results.order_by('name')], [ ('Adrian Holovaty', 1), ('Brad Dayley', 1), ('Jacob Kaplan-Moss', 0), ('James Bennett', 1), ('Jeffrey Forcier', 1), ('Paul Bissex', 0), ('Peter Norvig', 2), ('Stuart Russell', 0), ('Wesley J. Chun', 0), ] ) @skipUnlessAnyDBFeature('allows_group_by_pk', 'allows_group_by_selected_pks') def test_aggregate_duplicate_columns_only(self): # Works with only() too. results = Author.objects.only('id', 'name').annotate(num_contacts=Count('book_contact_set')) _, _, grouping = results.query.get_compiler(using='default').pre_sql_setup() self.assertEqual(len(grouping), 1) self.assertIn('id', grouping[0][0]) self.assertNotIn('name', grouping[0][0]) self.assertNotIn('age', grouping[0][0]) self.assertEqual( [(a.name, a.num_contacts) for a in results.order_by('name')], [ ('Adrian Holovaty', 1), ('Brad Dayley', 1), ('Jacob Kaplan-Moss', 0), ('James Bennett', 1), ('Jeffrey Forcier', 1), ('Paul Bissex', 0), ('Peter Norvig', 2), ('Stuart Russell', 0), ('Wesley J. Chun', 0), ] ) @skipUnlessAnyDBFeature('allows_group_by_pk', 'allows_group_by_selected_pks') def test_aggregate_duplicate_columns_select_related(self): # And select_related() results = Book.objects.select_related('contact').annotate( num_authors=Count('authors')) _, _, grouping = results.query.get_compiler(using='default').pre_sql_setup() # In the case of `group_by_selected_pks` we also group by contact.id because of the select_related. self.assertEqual(len(grouping), 1 if connection.features.allows_group_by_pk else 2) self.assertIn('id', grouping[0][0]) self.assertNotIn('name', grouping[0][0]) self.assertNotIn('contact', grouping[0][0]) self.assertEqual( [(b.name, b.num_authors) for b in results.order_by('name')], [ ('Artificial Intelligence: A Modern Approach', 2), ('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 1), ('Practical Django Projects', 1), ('Python Web Development with Django', 3), ('Sams Teach Yourself Django in 24 Hours', 1), ('The Definitive Guide to Django: Web Development Done Right', 2) ] ) @skipUnlessDBFeature('allows_group_by_selected_pks') def test_aggregate_ummanaged_model_columns(self): """ Unmanaged models are sometimes used to represent database views which may not allow grouping by selected primary key. """ def assertQuerysetResults(queryset): self.assertEqual( [(b.name, b.num_authors) for b in queryset.order_by('name')], [ ('Artificial Intelligence: A Modern Approach', 2), ('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 1), ('Practical Django Projects', 1), ('Python Web Development with Django', 3), ('Sams Teach Yourself Django in 24 Hours', 1), ('The Definitive Guide to Django: Web Development Done Right', 2), ] ) queryset = Book.objects.select_related('contact').annotate(num_authors=Count('authors')) # Unmanaged origin model. with mock.patch.object(Book._meta, 'managed', False): _, _, grouping = queryset.query.get_compiler(using='default').pre_sql_setup() self.assertEqual(len(grouping), len(Book._meta.fields) + 1) for index, field in enumerate(Book._meta.fields): self.assertIn(field.name, grouping[index][0]) self.assertIn(Author._meta.pk.name, grouping[-1][0]) assertQuerysetResults(queryset) # Unmanaged related model. with mock.patch.object(Author._meta, 'managed', False): _, _, grouping = queryset.query.get_compiler(using='default').pre_sql_setup() self.assertEqual(len(grouping), len(Author._meta.fields) + 1) self.assertIn(Book._meta.pk.name, grouping[0][0]) for index, field in enumerate(Author._meta.fields): self.assertIn(field.name, grouping[index + 1][0]) assertQuerysetResults(queryset) def test_reverse_join_trimming(self): qs = Author.objects.annotate(Count('book_contact_set__contact')) self.assertIn(' JOIN ', str(qs.query)) def test_aggregation_with_generic_reverse_relation(self): """ Regression test for #10870: Aggregates with joins ignore extra filters provided by setup_joins tests aggregations with generic reverse relations """ django_book = Book.objects.get(name='Practical Django Projects') ItemTag.objects.create( object_id=django_book.id, tag='intermediate', content_type=ContentType.objects.get_for_model(django_book), ) ItemTag.objects.create( object_id=django_book.id, tag='django', content_type=ContentType.objects.get_for_model(django_book), ) # Assign a tag to model with same PK as the book above. If the JOIN # used in aggregation doesn't have content type as part of the # condition the annotation will also count the 'hi mom' tag for b. wmpk = WithManualPK.objects.create(id=django_book.pk) ItemTag.objects.create( object_id=wmpk.id, tag='hi mom', content_type=ContentType.objects.get_for_model(wmpk), ) ai_book = Book.objects.get(name__startswith='Paradigms of Artificial Intelligence') ItemTag.objects.create( object_id=ai_book.id, tag='intermediate', content_type=ContentType.objects.get_for_model(ai_book), ) self.assertEqual(Book.objects.aggregate(Count('tags')), {'tags__count': 3}) results = Book.objects.annotate(Count('tags')).order_by('-tags__count', 'name') self.assertEqual( [(b.name, b.tags__count) for b in results], [ ('Practical Django Projects', 2), ('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 1), ('Artificial Intelligence: A Modern Approach', 0), ('Python Web Development with Django', 0), ('Sams Teach Yourself Django in 24 Hours', 0), ('The Definitive Guide to Django: Web Development Done Right', 0) ] ) def test_negated_aggregation(self): expected_results = Author.objects.exclude( pk__in=Author.objects.annotate(book_cnt=Count('book')).filter(book_cnt=2) ).order_by('name') expected_results = [a.name for a in expected_results] qs = Author.objects.annotate(book_cnt=Count('book')).exclude( Q(book_cnt=2), Q(book_cnt=2)).order_by('name') self.assertQuerysetEqual( qs, expected_results, lambda b: b.name ) expected_results = Author.objects.exclude( pk__in=Author.objects.annotate(book_cnt=Count('book')).filter(book_cnt=2) ).order_by('name') expected_results = [a.name for a in expected_results] qs = Author.objects.annotate(book_cnt=Count('book')).exclude(Q(book_cnt=2) | Q(book_cnt=2)).order_by('name') self.assertQuerysetEqual( qs, expected_results, lambda b: b.name ) def test_name_filters(self): qs = Author.objects.annotate(Count('book')).filter( Q(book__count__exact=2) | Q(name='Adrian Holovaty') ).order_by('name') self.assertQuerysetEqual( qs, ['Adrian Holovaty', 'Peter Norvig'], lambda b: b.name ) def test_name_expressions(self): # Aggregates are spotted correctly from F objects. # Note that Adrian's age is 34 in the fixtures, and he has one book # so both conditions match one author. qs = Author.objects.annotate(Count('book')).filter( Q(name='Peter Norvig') | Q(age=F('book__count') + 33) ).order_by('name') self.assertQuerysetEqual( qs, ['Adrian Holovaty', 'Peter Norvig'], lambda b: b.name ) def test_ticket_11293(self): q1 = Q(price__gt=50) q2 = Q(authors__count__gt=1) query = Book.objects.annotate(Count('authors')).filter( q1 | q2).order_by('pk') self.assertQuerysetEqual( query, [1, 4, 5, 6], lambda b: b.pk) def test_ticket_11293_q_immutable(self): """ Splitting a q object to parts for where/having doesn't alter the original q-object. """ q1 = Q(isbn='') q2 = Q(authors__count__gt=1) query = Book.objects.annotate(Count('authors')) query.filter(q1 | q2) self.assertEqual(len(q2.children), 1) @ignore_warnings(category=RemovedInDjango31Warning) def test_fobj_group_by(self): """ An F() object referring to related column works correctly in group by. """ qs = Book.objects.annotate( account=Count('authors') ).filter( account=F('publisher__num_awards') ) self.assertQuerysetEqual( qs, ['Sams Teach Yourself Django in 24 Hours'], lambda b: b.name) def test_annotate_reserved_word(self): """ Regression #18333 - Ensure annotated column name is properly quoted. """ vals = Book.objects.annotate(select=Count('authors__id')).aggregate(Sum('select'), Avg('select')) self.assertEqual(vals, { 'select__sum': 10, 'select__avg': Approximate(1.666, places=2), }) def test_annotate_on_relation(self): book = Book.objects.annotate(avg_price=Avg('price'), publisher_name=F('publisher__name')).get(pk=self.b1.pk) self.assertEqual(book.avg_price, 30.00) self.assertEqual(book.publisher_name, "Apress") def test_aggregate_on_relation(self): # A query with an existing annotation aggregation on a relation should # succeed. qs = Book.objects.annotate(avg_price=Avg('price')).aggregate( publisher_awards=Sum('publisher__num_awards') ) self.assertEqual(qs['publisher_awards'], 30) def test_annotate_distinct_aggregate(self): # There are three books with rating of 4.0 and two of the books have # the same price. Hence, the distinct removes one rating of 4.0 # from the results. vals1 = Book.objects.values('rating', 'price').distinct().aggregate(result=Sum('rating')) vals2 = Book.objects.aggregate(result=Sum('rating') - Value(4.0)) self.assertEqual(vals1, vals2) def test_annotate_values_list_flat(self): """Find ages that are shared by at least two authors.""" qs = Author.objects.values_list('age', flat=True).annotate(age_count=Count('age')).filter(age_count__gt=1) self.assertSequenceEqual(qs, [29]) def test_allow_distinct(self): class MyAggregate(Aggregate): pass with self.assertRaisesMessage(TypeError, 'MyAggregate does not allow distinct'): MyAggregate('foo', distinct=True) class DistinctAggregate(Aggregate): allow_distinct = True DistinctAggregate('foo', distinct=True) class JoinPromotionTests(TestCase): def test_ticket_21150(self): b = Bravo.objects.create() c = Charlie.objects.create(bravo=b) qs = Charlie.objects.select_related('alfa').annotate(Count('bravo__charlie')) self.assertSequenceEqual(qs, [c]) self.assertIs(qs[0].alfa, None) a = Alfa.objects.create() c.alfa = a c.save() # Force re-evaluation qs = qs.all() self.assertSequenceEqual(qs, [c]) self.assertEqual(qs[0].alfa, a) def test_existing_join_not_promoted(self): # No promotion for existing joins qs = Charlie.objects.filter(alfa__name__isnull=False).annotate(Count('alfa__name')) self.assertIn(' INNER JOIN ', str(qs.query)) # Also, the existing join is unpromoted when doing filtering for already # promoted join. qs = Charlie.objects.annotate(Count('alfa__name')).filter(alfa__name__isnull=False) self.assertIn(' INNER JOIN ', str(qs.query)) # But, as the join is nullable first use by annotate will be LOUTER qs = Charlie.objects.annotate(Count('alfa__name')) self.assertIn(' LEFT OUTER JOIN ', str(qs.query)) @ignore_warnings(category=RemovedInDjango31Warning) def test_non_nullable_fk_not_promoted(self): qs = Book.objects.annotate(Count('contact__name')) self.assertIn(' INNER JOIN ', str(qs.query)) class SelfReferentialFKTests(TestCase): def test_ticket_24748(self): t1 = SelfRefFK.objects.create(name='t1') SelfRefFK.objects.create(name='t2', parent=t1) SelfRefFK.objects.create(name='t3', parent=t1) self.assertQuerysetEqual( SelfRefFK.objects.annotate(num_children=Count('children')).order_by('name'), [('t1', 2), ('t2', 0), ('t3', 0)], lambda x: (x.name, x.num_children) )
9d92822949da01c73948bcdfa8eaa991d42f5f1c6651179cab17eca6b1d63fa6
import decimal import enum import json import unittest import uuid from django import forms from django.core import checks, exceptions, serializers, validators from django.core.exceptions import FieldError from django.core.management import call_command from django.db import IntegrityError, connection, models from django.db.models.expressions import RawSQL from django.db.models.functions import Cast from django.test import TransactionTestCase, modify_settings, override_settings from django.test.utils import isolate_apps from django.utils import timezone from . import ( PostgreSQLSimpleTestCase, PostgreSQLTestCase, PostgreSQLWidgetTestCase, ) from .models import ( ArrayEnumModel, ArrayFieldSubclass, CharArrayModel, DateTimeArrayModel, IntegerArrayModel, NestedIntegerArrayModel, NullableIntegerArrayModel, OtherTypesArrayModel, PostgreSQLModel, Tag, ) try: from django.contrib.postgres.fields import ArrayField from django.contrib.postgres.fields.array import IndexTransform, SliceTransform from django.contrib.postgres.forms import ( SimpleArrayField, SplitArrayField, SplitArrayWidget, ) from psycopg2.extras import NumericRange except ImportError: pass class TestSaveLoad(PostgreSQLTestCase): def test_integer(self): instance = IntegerArrayModel(field=[1, 2, 3]) instance.save() loaded = IntegerArrayModel.objects.get() self.assertEqual(instance.field, loaded.field) def test_char(self): instance = CharArrayModel(field=['hello', 'goodbye']) instance.save() loaded = CharArrayModel.objects.get() self.assertEqual(instance.field, loaded.field) def test_dates(self): instance = DateTimeArrayModel( datetimes=[timezone.now()], dates=[timezone.now().date()], times=[timezone.now().time()], ) instance.save() loaded = DateTimeArrayModel.objects.get() self.assertEqual(instance.datetimes, loaded.datetimes) self.assertEqual(instance.dates, loaded.dates) self.assertEqual(instance.times, loaded.times) def test_tuples(self): instance = IntegerArrayModel(field=(1,)) instance.save() loaded = IntegerArrayModel.objects.get() self.assertSequenceEqual(instance.field, loaded.field) def test_integers_passed_as_strings(self): # This checks that get_prep_value is deferred properly instance = IntegerArrayModel(field=['1']) instance.save() loaded = IntegerArrayModel.objects.get() self.assertEqual(loaded.field, [1]) def test_default_null(self): instance = NullableIntegerArrayModel() instance.save() loaded = NullableIntegerArrayModel.objects.get(pk=instance.pk) self.assertIsNone(loaded.field) self.assertEqual(instance.field, loaded.field) def test_null_handling(self): instance = NullableIntegerArrayModel(field=None) instance.save() loaded = NullableIntegerArrayModel.objects.get() self.assertEqual(instance.field, loaded.field) instance = IntegerArrayModel(field=None) with self.assertRaises(IntegrityError): instance.save() def test_nested(self): instance = NestedIntegerArrayModel(field=[[1, 2], [3, 4]]) instance.save() loaded = NestedIntegerArrayModel.objects.get() self.assertEqual(instance.field, loaded.field) def test_other_array_types(self): instance = OtherTypesArrayModel( ips=['192.168.0.1', '::1'], uuids=[uuid.uuid4()], decimals=[decimal.Decimal(1.25), 1.75], tags=[Tag(1), Tag(2), Tag(3)], json=[{'a': 1}, {'b': 2}], int_ranges=[NumericRange(10, 20), NumericRange(30, 40)], bigint_ranges=[ NumericRange(7000000000, 10000000000), NumericRange(50000000000, 70000000000), ] ) instance.save() loaded = OtherTypesArrayModel.objects.get() self.assertEqual(instance.ips, loaded.ips) self.assertEqual(instance.uuids, loaded.uuids) self.assertEqual(instance.decimals, loaded.decimals) self.assertEqual(instance.tags, loaded.tags) self.assertEqual(instance.json, loaded.json) self.assertEqual(instance.int_ranges, loaded.int_ranges) self.assertEqual(instance.bigint_ranges, loaded.bigint_ranges) def test_null_from_db_value_handling(self): instance = OtherTypesArrayModel.objects.create( ips=['192.168.0.1', '::1'], uuids=[uuid.uuid4()], decimals=[decimal.Decimal(1.25), 1.75], tags=None, ) instance.refresh_from_db() self.assertIsNone(instance.tags) self.assertEqual(instance.json, []) self.assertIsNone(instance.int_ranges) self.assertIsNone(instance.bigint_ranges) def test_model_set_on_base_field(self): instance = IntegerArrayModel() field = instance._meta.get_field('field') self.assertEqual(field.model, IntegerArrayModel) self.assertEqual(field.base_field.model, IntegerArrayModel) class TestQuerying(PostgreSQLTestCase): @classmethod def setUpTestData(cls): cls.objs = NullableIntegerArrayModel.objects.bulk_create([ NullableIntegerArrayModel(field=[1]), NullableIntegerArrayModel(field=[2]), NullableIntegerArrayModel(field=[2, 3]), NullableIntegerArrayModel(field=[20, 30, 40]), NullableIntegerArrayModel(field=None), ]) def test_empty_list(self): NullableIntegerArrayModel.objects.create(field=[]) obj = NullableIntegerArrayModel.objects.annotate( empty_array=models.Value([], output_field=ArrayField(models.IntegerField())), ).filter(field=models.F('empty_array')).get() self.assertEqual(obj.field, []) self.assertEqual(obj.empty_array, []) def test_exact(self): self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__exact=[1]), self.objs[:1] ) def test_exact_charfield(self): instance = CharArrayModel.objects.create(field=['text']) self.assertSequenceEqual( CharArrayModel.objects.filter(field=['text']), [instance] ) def test_exact_nested(self): instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]]) self.assertSequenceEqual( NestedIntegerArrayModel.objects.filter(field=[[1, 2], [3, 4]]), [instance] ) def test_isnull(self): self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__isnull=True), self.objs[-1:] ) def test_gt(self): self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__gt=[0]), self.objs[:4] ) def test_lt(self): self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__lt=[2]), self.objs[:1] ) def test_in(self): self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__in=[[1], [2]]), self.objs[:2] ) def test_in_subquery(self): IntegerArrayModel.objects.create(field=[2, 3]) self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter( field__in=IntegerArrayModel.objects.all().values_list('field', flat=True) ), self.objs[2:3] ) @unittest.expectedFailure def test_in_including_F_object(self): # This test asserts that Array objects passed to filters can be # constructed to contain F objects. This currently doesn't work as the # psycopg2 mogrify method that generates the ARRAY() syntax is # expecting literals, not column references (#27095). self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__in=[[models.F('id')]]), self.objs[:2] ) def test_in_as_F_object(self): self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__in=[models.F('field')]), self.objs[:4] ) def test_contained_by(self): self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__contained_by=[1, 2]), self.objs[:2] ) @unittest.expectedFailure def test_contained_by_including_F_object(self): # This test asserts that Array objects passed to filters can be # constructed to contain F objects. This currently doesn't work as the # psycopg2 mogrify method that generates the ARRAY() syntax is # expecting literals, not column references (#27095). self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__contained_by=[models.F('id'), 2]), self.objs[:2] ) def test_contains(self): self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__contains=[2]), self.objs[1:3] ) def test_icontains(self): # Using the __icontains lookup with ArrayField is inefficient. instance = CharArrayModel.objects.create(field=['FoO']) self.assertSequenceEqual( CharArrayModel.objects.filter(field__icontains='foo'), [instance] ) def test_contains_charfield(self): # Regression for #22907 self.assertSequenceEqual( CharArrayModel.objects.filter(field__contains=['text']), [] ) def test_contained_by_charfield(self): self.assertSequenceEqual( CharArrayModel.objects.filter(field__contained_by=['text']), [] ) def test_overlap_charfield(self): self.assertSequenceEqual( CharArrayModel.objects.filter(field__overlap=['text']), [] ) def test_index(self): self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__0=2), self.objs[1:3] ) def test_index_chained(self): self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__0__lt=3), self.objs[0:3] ) def test_index_nested(self): instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]]) self.assertSequenceEqual( NestedIntegerArrayModel.objects.filter(field__0__0=1), [instance] ) @unittest.expectedFailure def test_index_used_on_nested_data(self): instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]]) self.assertSequenceEqual( NestedIntegerArrayModel.objects.filter(field__0=[1, 2]), [instance] ) def test_index_transform_expression(self): expr = RawSQL("string_to_array(%s, ';')", ['1;2']) self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter( field__0=Cast( IndexTransform(1, models.IntegerField, expr), output_field=models.IntegerField(), ), ), self.objs[:1], ) def test_overlap(self): self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__overlap=[1, 2]), self.objs[0:3] ) def test_len(self): self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__len__lte=2), self.objs[0:3] ) def test_len_empty_array(self): obj = NullableIntegerArrayModel.objects.create(field=[]) self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__len=0), [obj] ) def test_slice(self): self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__0_1=[2]), self.objs[1:3] ) self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__0_2=[2, 3]), self.objs[2:3] ) def test_order_by_slice(self): more_objs = ( NullableIntegerArrayModel.objects.create(field=[1, 637]), NullableIntegerArrayModel.objects.create(field=[2, 1]), NullableIntegerArrayModel.objects.create(field=[3, -98123]), NullableIntegerArrayModel.objects.create(field=[4, 2]), ) self.assertSequenceEqual( NullableIntegerArrayModel.objects.order_by('field__1'), [ more_objs[2], more_objs[1], more_objs[3], self.objs[2], self.objs[3], more_objs[0], self.objs[4], self.objs[1], self.objs[0], ] ) @unittest.expectedFailure def test_slice_nested(self): instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]]) self.assertSequenceEqual( NestedIntegerArrayModel.objects.filter(field__0__0_1=[1]), [instance] ) def test_slice_transform_expression(self): expr = RawSQL("string_to_array(%s, ';')", ['9;2;3']) self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__0_2=SliceTransform(2, 3, expr)), self.objs[2:3], ) def test_usage_in_subquery(self): self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter( id__in=NullableIntegerArrayModel.objects.filter(field__len=3) ), [self.objs[3]] ) def test_enum_lookup(self): class TestEnum(enum.Enum): VALUE_1 = 'value_1' instance = ArrayEnumModel.objects.create(array_of_enums=[TestEnum.VALUE_1]) self.assertSequenceEqual( ArrayEnumModel.objects.filter(array_of_enums__contains=[TestEnum.VALUE_1]), [instance] ) def test_unsupported_lookup(self): msg = "Unsupported lookup '0_bar' for ArrayField or join on the field not permitted." with self.assertRaisesMessage(FieldError, msg): list(NullableIntegerArrayModel.objects.filter(field__0_bar=[2])) msg = "Unsupported lookup '0bar' for ArrayField or join on the field not permitted." with self.assertRaisesMessage(FieldError, msg): list(NullableIntegerArrayModel.objects.filter(field__0bar=[2])) def test_grouping_by_annotations_with_array_field_param(self): value = models.Value([1], output_field=ArrayField(models.IntegerField())) self.assertEqual( NullableIntegerArrayModel.objects.annotate( array_length=models.Func(value, 1, function='ARRAY_LENGTH'), ).values('array_length').annotate( count=models.Count('pk'), ).get()['array_length'], 1, ) class TestDateTimeExactQuerying(PostgreSQLTestCase): @classmethod def setUpTestData(cls): now = timezone.now() cls.datetimes = [now] cls.dates = [now.date()] cls.times = [now.time()] cls.objs = [ DateTimeArrayModel.objects.create(datetimes=cls.datetimes, dates=cls.dates, times=cls.times), ] def test_exact_datetimes(self): self.assertSequenceEqual( DateTimeArrayModel.objects.filter(datetimes=self.datetimes), self.objs ) def test_exact_dates(self): self.assertSequenceEqual( DateTimeArrayModel.objects.filter(dates=self.dates), self.objs ) def test_exact_times(self): self.assertSequenceEqual( DateTimeArrayModel.objects.filter(times=self.times), self.objs ) class TestOtherTypesExactQuerying(PostgreSQLTestCase): @classmethod def setUpTestData(cls): cls.ips = ['192.168.0.1', '::1'] cls.uuids = [uuid.uuid4()] cls.decimals = [decimal.Decimal(1.25), 1.75] cls.tags = [Tag(1), Tag(2), Tag(3)] cls.objs = [ OtherTypesArrayModel.objects.create( ips=cls.ips, uuids=cls.uuids, decimals=cls.decimals, tags=cls.tags, ) ] def test_exact_ip_addresses(self): self.assertSequenceEqual( OtherTypesArrayModel.objects.filter(ips=self.ips), self.objs ) def test_exact_uuids(self): self.assertSequenceEqual( OtherTypesArrayModel.objects.filter(uuids=self.uuids), self.objs ) def test_exact_decimals(self): self.assertSequenceEqual( OtherTypesArrayModel.objects.filter(decimals=self.decimals), self.objs ) def test_exact_tags(self): self.assertSequenceEqual( OtherTypesArrayModel.objects.filter(tags=self.tags), self.objs ) @isolate_apps('postgres_tests') class TestChecks(PostgreSQLSimpleTestCase): def test_field_checks(self): class MyModel(PostgreSQLModel): field = ArrayField(models.CharField()) model = MyModel() errors = model.check() self.assertEqual(len(errors), 1) # The inner CharField is missing a max_length. self.assertEqual(errors[0].id, 'postgres.E001') self.assertIn('max_length', errors[0].msg) def test_invalid_base_fields(self): class MyModel(PostgreSQLModel): field = ArrayField(models.ManyToManyField('postgres_tests.IntegerArrayModel')) model = MyModel() errors = model.check() self.assertEqual(len(errors), 1) self.assertEqual(errors[0].id, 'postgres.E002') def test_invalid_default(self): class MyModel(PostgreSQLModel): field = ArrayField(models.IntegerField(), default=[]) model = MyModel() self.assertEqual(model.check(), [ checks.Warning( msg=( "ArrayField default should be a callable instead of an " "instance so that it's not shared between all field " "instances." ), hint='Use a callable instead, e.g., use `list` instead of `[]`.', obj=MyModel._meta.get_field('field'), id='postgres.E003', ) ]) def test_valid_default(self): class MyModel(PostgreSQLModel): field = ArrayField(models.IntegerField(), default=list) model = MyModel() self.assertEqual(model.check(), []) def test_valid_default_none(self): class MyModel(PostgreSQLModel): field = ArrayField(models.IntegerField(), default=None) model = MyModel() self.assertEqual(model.check(), []) def test_nested_field_checks(self): """ Nested ArrayFields are permitted. """ class MyModel(PostgreSQLModel): field = ArrayField(ArrayField(models.CharField())) model = MyModel() errors = model.check() self.assertEqual(len(errors), 1) # The inner CharField is missing a max_length. self.assertEqual(errors[0].id, 'postgres.E001') self.assertIn('max_length', errors[0].msg) @unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific tests") class TestMigrations(TransactionTestCase): available_apps = ['postgres_tests'] def test_deconstruct(self): field = ArrayField(models.IntegerField()) name, path, args, kwargs = field.deconstruct() new = ArrayField(*args, **kwargs) self.assertEqual(type(new.base_field), type(field.base_field)) self.assertIsNot(new.base_field, field.base_field) def test_deconstruct_with_size(self): field = ArrayField(models.IntegerField(), size=3) name, path, args, kwargs = field.deconstruct() new = ArrayField(*args, **kwargs) self.assertEqual(new.size, field.size) def test_deconstruct_args(self): field = ArrayField(models.CharField(max_length=20)) name, path, args, kwargs = field.deconstruct() new = ArrayField(*args, **kwargs) self.assertEqual(new.base_field.max_length, field.base_field.max_length) def test_subclass_deconstruct(self): field = ArrayField(models.IntegerField()) name, path, args, kwargs = field.deconstruct() self.assertEqual(path, 'django.contrib.postgres.fields.ArrayField') field = ArrayFieldSubclass() name, path, args, kwargs = field.deconstruct() self.assertEqual(path, 'postgres_tests.models.ArrayFieldSubclass') @override_settings(MIGRATION_MODULES={ "postgres_tests": "postgres_tests.array_default_migrations", }) def test_adding_field_with_default(self): # See #22962 table_name = 'postgres_tests_integerarraydefaultmodel' with connection.cursor() as cursor: self.assertNotIn(table_name, connection.introspection.table_names(cursor)) call_command('migrate', 'postgres_tests', verbosity=0) with connection.cursor() as cursor: self.assertIn(table_name, connection.introspection.table_names(cursor)) call_command('migrate', 'postgres_tests', 'zero', verbosity=0) with connection.cursor() as cursor: self.assertNotIn(table_name, connection.introspection.table_names(cursor)) @override_settings(MIGRATION_MODULES={ "postgres_tests": "postgres_tests.array_index_migrations", }) def test_adding_arrayfield_with_index(self): """ ArrayField shouldn't have varchar_patterns_ops or text_patterns_ops indexes. """ table_name = 'postgres_tests_chartextarrayindexmodel' call_command('migrate', 'postgres_tests', verbosity=0) with connection.cursor() as cursor: like_constraint_columns_list = [ v['columns'] for k, v in list(connection.introspection.get_constraints(cursor, table_name).items()) if k.endswith('_like') ] # Only the CharField should have a LIKE index. self.assertEqual(like_constraint_columns_list, [['char2']]) # All fields should have regular indexes. with connection.cursor() as cursor: indexes = [ c['columns'][0] for c in connection.introspection.get_constraints(cursor, table_name).values() if c['index'] and len(c['columns']) == 1 ] self.assertIn('char', indexes) self.assertIn('char2', indexes) self.assertIn('text', indexes) call_command('migrate', 'postgres_tests', 'zero', verbosity=0) with connection.cursor() as cursor: self.assertNotIn(table_name, connection.introspection.table_names(cursor)) class TestSerialization(PostgreSQLSimpleTestCase): test_data = ( '[{"fields": {"field": "[\\"1\\", \\"2\\", null]"}, "model": "postgres_tests.integerarraymodel", "pk": null}]' ) def test_dumping(self): instance = IntegerArrayModel(field=[1, 2, None]) data = serializers.serialize('json', [instance]) self.assertEqual(json.loads(data), json.loads(self.test_data)) def test_loading(self): instance = list(serializers.deserialize('json', self.test_data))[0].object self.assertEqual(instance.field, [1, 2, None]) class TestValidation(PostgreSQLSimpleTestCase): def test_unbounded(self): field = ArrayField(models.IntegerField()) with self.assertRaises(exceptions.ValidationError) as cm: field.clean([1, None], None) self.assertEqual(cm.exception.code, 'item_invalid') self.assertEqual( cm.exception.message % cm.exception.params, 'Item 2 in the array did not validate: This field cannot be null.' ) def test_blank_true(self): field = ArrayField(models.IntegerField(blank=True, null=True)) # This should not raise a validation error field.clean([1, None], None) def test_with_size(self): field = ArrayField(models.IntegerField(), size=3) field.clean([1, 2, 3], None) with self.assertRaises(exceptions.ValidationError) as cm: field.clean([1, 2, 3, 4], None) self.assertEqual(cm.exception.messages[0], 'List contains 4 items, it should contain no more than 3.') def test_nested_array_mismatch(self): field = ArrayField(ArrayField(models.IntegerField())) field.clean([[1, 2], [3, 4]], None) with self.assertRaises(exceptions.ValidationError) as cm: field.clean([[1, 2], [3, 4, 5]], None) self.assertEqual(cm.exception.code, 'nested_array_mismatch') self.assertEqual(cm.exception.messages[0], 'Nested arrays must have the same length.') def test_with_base_field_error_params(self): field = ArrayField(models.CharField(max_length=2)) with self.assertRaises(exceptions.ValidationError) as cm: field.clean(['abc'], None) self.assertEqual(len(cm.exception.error_list), 1) exception = cm.exception.error_list[0] self.assertEqual( exception.message, 'Item 1 in the array did not validate: Ensure this value has at most 2 characters (it has 3).' ) self.assertEqual(exception.code, 'item_invalid') self.assertEqual(exception.params, {'nth': 1, 'value': 'abc', 'limit_value': 2, 'show_value': 3}) def test_with_validators(self): field = ArrayField(models.IntegerField(validators=[validators.MinValueValidator(1)])) field.clean([1, 2], None) with self.assertRaises(exceptions.ValidationError) as cm: field.clean([0], None) self.assertEqual(len(cm.exception.error_list), 1) exception = cm.exception.error_list[0] self.assertEqual( exception.message, 'Item 1 in the array did not validate: Ensure this value is greater than or equal to 1.' ) self.assertEqual(exception.code, 'item_invalid') self.assertEqual(exception.params, {'nth': 1, 'value': 0, 'limit_value': 1, 'show_value': 0}) class TestSimpleFormField(PostgreSQLSimpleTestCase): def test_valid(self): field = SimpleArrayField(forms.CharField()) value = field.clean('a,b,c') self.assertEqual(value, ['a', 'b', 'c']) def test_to_python_fail(self): field = SimpleArrayField(forms.IntegerField()) with self.assertRaises(exceptions.ValidationError) as cm: field.clean('a,b,9') self.assertEqual(cm.exception.messages[0], 'Item 1 in the array did not validate: Enter a whole number.') def test_validate_fail(self): field = SimpleArrayField(forms.CharField(required=True)) with self.assertRaises(exceptions.ValidationError) as cm: field.clean('a,b,') self.assertEqual(cm.exception.messages[0], 'Item 3 in the array did not validate: This field is required.') def test_validate_fail_base_field_error_params(self): field = SimpleArrayField(forms.CharField(max_length=2)) with self.assertRaises(exceptions.ValidationError) as cm: field.clean('abc,c,defg') errors = cm.exception.error_list self.assertEqual(len(errors), 2) first_error = errors[0] self.assertEqual( first_error.message, 'Item 1 in the array did not validate: Ensure this value has at most 2 characters (it has 3).' ) self.assertEqual(first_error.code, 'item_invalid') self.assertEqual(first_error.params, {'nth': 1, 'value': 'abc', 'limit_value': 2, 'show_value': 3}) second_error = errors[1] self.assertEqual( second_error.message, 'Item 3 in the array did not validate: Ensure this value has at most 2 characters (it has 4).' ) self.assertEqual(second_error.code, 'item_invalid') self.assertEqual(second_error.params, {'nth': 3, 'value': 'defg', 'limit_value': 2, 'show_value': 4}) def test_validators_fail(self): field = SimpleArrayField(forms.RegexField('[a-e]{2}')) with self.assertRaises(exceptions.ValidationError) as cm: field.clean('a,bc,de') self.assertEqual(cm.exception.messages[0], 'Item 1 in the array did not validate: Enter a valid value.') def test_delimiter(self): field = SimpleArrayField(forms.CharField(), delimiter='|') value = field.clean('a|b|c') self.assertEqual(value, ['a', 'b', 'c']) def test_delimiter_with_nesting(self): field = SimpleArrayField(SimpleArrayField(forms.CharField()), delimiter='|') value = field.clean('a,b|c,d') self.assertEqual(value, [['a', 'b'], ['c', 'd']]) def test_prepare_value(self): field = SimpleArrayField(forms.CharField()) value = field.prepare_value(['a', 'b', 'c']) self.assertEqual(value, 'a,b,c') def test_max_length(self): field = SimpleArrayField(forms.CharField(), max_length=2) with self.assertRaises(exceptions.ValidationError) as cm: field.clean('a,b,c') self.assertEqual(cm.exception.messages[0], 'List contains 3 items, it should contain no more than 2.') def test_min_length(self): field = SimpleArrayField(forms.CharField(), min_length=4) with self.assertRaises(exceptions.ValidationError) as cm: field.clean('a,b,c') self.assertEqual(cm.exception.messages[0], 'List contains 3 items, it should contain no fewer than 4.') def test_required(self): field = SimpleArrayField(forms.CharField(), required=True) with self.assertRaises(exceptions.ValidationError) as cm: field.clean('') self.assertEqual(cm.exception.messages[0], 'This field is required.') def test_model_field_formfield(self): model_field = ArrayField(models.CharField(max_length=27)) form_field = model_field.formfield() self.assertIsInstance(form_field, SimpleArrayField) self.assertIsInstance(form_field.base_field, forms.CharField) self.assertEqual(form_field.base_field.max_length, 27) def test_model_field_formfield_size(self): model_field = ArrayField(models.CharField(max_length=27), size=4) form_field = model_field.formfield() self.assertIsInstance(form_field, SimpleArrayField) self.assertEqual(form_field.max_length, 4) def test_model_field_choices(self): model_field = ArrayField(models.IntegerField(choices=((1, 'A'), (2, 'B')))) form_field = model_field.formfield() self.assertEqual(form_field.clean('1,2'), [1, 2]) def test_already_converted_value(self): field = SimpleArrayField(forms.CharField()) vals = ['a', 'b', 'c'] self.assertEqual(field.clean(vals), vals) def test_has_changed(self): field = SimpleArrayField(forms.IntegerField()) self.assertIs(field.has_changed([1, 2], [1, 2]), False) self.assertIs(field.has_changed([1, 2], '1,2'), False) self.assertIs(field.has_changed([1, 2], '1,2,3'), True) self.assertIs(field.has_changed([1, 2], 'a,b'), True) def test_has_changed_empty(self): field = SimpleArrayField(forms.CharField()) self.assertIs(field.has_changed(None, None), False) self.assertIs(field.has_changed(None, ''), False) self.assertIs(field.has_changed(None, []), False) self.assertIs(field.has_changed([], None), False) self.assertIs(field.has_changed([], ''), False) class TestSplitFormField(PostgreSQLSimpleTestCase): def test_valid(self): class SplitForm(forms.Form): array = SplitArrayField(forms.CharField(), size=3) data = {'array_0': 'a', 'array_1': 'b', 'array_2': 'c'} form = SplitForm(data) self.assertTrue(form.is_valid()) self.assertEqual(form.cleaned_data, {'array': ['a', 'b', 'c']}) def test_required(self): class SplitForm(forms.Form): array = SplitArrayField(forms.CharField(), required=True, size=3) data = {'array_0': '', 'array_1': '', 'array_2': ''} form = SplitForm(data) self.assertFalse(form.is_valid()) self.assertEqual(form.errors, {'array': ['This field is required.']}) def test_remove_trailing_nulls(self): class SplitForm(forms.Form): array = SplitArrayField(forms.CharField(required=False), size=5, remove_trailing_nulls=True) data = {'array_0': 'a', 'array_1': '', 'array_2': 'b', 'array_3': '', 'array_4': ''} form = SplitForm(data) self.assertTrue(form.is_valid(), form.errors) self.assertEqual(form.cleaned_data, {'array': ['a', '', 'b']}) def test_remove_trailing_nulls_not_required(self): class SplitForm(forms.Form): array = SplitArrayField( forms.CharField(required=False), size=2, remove_trailing_nulls=True, required=False, ) data = {'array_0': '', 'array_1': ''} form = SplitForm(data) self.assertTrue(form.is_valid()) self.assertEqual(form.cleaned_data, {'array': []}) def test_required_field(self): class SplitForm(forms.Form): array = SplitArrayField(forms.CharField(), size=3) data = {'array_0': 'a', 'array_1': 'b', 'array_2': ''} form = SplitForm(data) self.assertFalse(form.is_valid()) self.assertEqual(form.errors, {'array': ['Item 3 in the array did not validate: This field is required.']}) def test_invalid_integer(self): msg = 'Item 2 in the array did not validate: Ensure this value is less than or equal to 100.' with self.assertRaisesMessage(exceptions.ValidationError, msg): SplitArrayField(forms.IntegerField(max_value=100), size=2).clean([0, 101]) # To locate the widget's template. @modify_settings(INSTALLED_APPS={'append': 'django.contrib.postgres'}) def test_rendering(self): class SplitForm(forms.Form): array = SplitArrayField(forms.CharField(), size=3) self.assertHTMLEqual(str(SplitForm()), ''' <tr> <th><label for="id_array_0">Array:</label></th> <td> <input id="id_array_0" name="array_0" type="text" required> <input id="id_array_1" name="array_1" type="text" required> <input id="id_array_2" name="array_2" type="text" required> </td> </tr> ''') def test_invalid_char_length(self): field = SplitArrayField(forms.CharField(max_length=2), size=3) with self.assertRaises(exceptions.ValidationError) as cm: field.clean(['abc', 'c', 'defg']) self.assertEqual(cm.exception.messages, [ 'Item 1 in the array did not validate: Ensure this value has at most 2 characters (it has 3).', 'Item 3 in the array did not validate: Ensure this value has at most 2 characters (it has 4).', ]) def test_splitarraywidget_value_omitted_from_data(self): class Form(forms.ModelForm): field = SplitArrayField(forms.IntegerField(), required=False, size=2) class Meta: model = IntegerArrayModel fields = ('field',) form = Form({'field_0': '1', 'field_1': '2'}) self.assertEqual(form.errors, {}) obj = form.save(commit=False) self.assertEqual(obj.field, [1, 2]) def test_splitarrayfield_has_changed(self): class Form(forms.ModelForm): field = SplitArrayField(forms.IntegerField(), required=False, size=2) class Meta: model = IntegerArrayModel fields = ('field',) obj = IntegerArrayModel(field=[1, 2]) form = Form({'field_0': '1', 'field_1': '2'}, instance=obj) self.assertFalse(form.has_changed()) class TestSplitFormWidget(PostgreSQLWidgetTestCase): def test_get_context(self): self.assertEqual( SplitArrayWidget(forms.TextInput(), size=2).get_context('name', ['val1', 'val2']), { 'widget': { 'name': 'name', 'is_hidden': False, 'required': False, 'value': "['val1', 'val2']", 'attrs': {}, 'template_name': 'postgres/widgets/split_array.html', 'subwidgets': [ { 'name': 'name_0', 'is_hidden': False, 'required': False, 'value': 'val1', 'attrs': {}, 'template_name': 'django/forms/widgets/text.html', 'type': 'text', }, { 'name': 'name_1', 'is_hidden': False, 'required': False, 'value': 'val2', 'attrs': {}, 'template_name': 'django/forms/widgets/text.html', 'type': 'text', }, ] } } ) def test_render(self): self.check_html( SplitArrayWidget(forms.TextInput(), size=2), 'array', None, """ <input name="array_0" type="text"> <input name="array_1" type="text"> """ ) def test_render_attrs(self): self.check_html( SplitArrayWidget(forms.TextInput(), size=2), 'array', ['val1', 'val2'], attrs={'id': 'foo'}, html=( """ <input id="foo_0" name="array_0" type="text" value="val1"> <input id="foo_1" name="array_1" type="text" value="val2"> """ ) ) def test_value_omitted_from_data(self): widget = SplitArrayWidget(forms.TextInput(), size=2) self.assertIs(widget.value_omitted_from_data({}, {}, 'field'), True) self.assertIs(widget.value_omitted_from_data({'field_0': 'value'}, {}, 'field'), False) self.assertIs(widget.value_omitted_from_data({'field_1': 'value'}, {}, 'field'), False) self.assertIs(widget.value_omitted_from_data({'field_0': 'value', 'field_1': 'value'}, {}, 'field'), False)
c906f184b68c7bde2c84c7b717336f51463e4e54676c01378d4761f119f4476c
import datetime import json from decimal import Decimal from django import forms from django.core import exceptions, serializers from django.db.models import DateField, DateTimeField, F, Func, Value from django.test import ignore_warnings, override_settings from django.utils import timezone from django.utils.deprecation import RemovedInDjango31Warning from . import PostgreSQLSimpleTestCase, PostgreSQLTestCase from .models import RangeLookupsModel, RangesModel try: from psycopg2.extras import DateRange, DateTimeTZRange, NumericRange from django.contrib.postgres import fields as pg_fields, forms as pg_forms from django.contrib.postgres.validators import ( RangeMaxValueValidator, RangeMinValueValidator, ) except ImportError: pass class TestSaveLoad(PostgreSQLTestCase): def test_all_fields(self): now = timezone.now() instance = RangesModel( ints=NumericRange(0, 10), bigints=NumericRange(10, 20), decimals=NumericRange(20, 30), timestamps=DateTimeTZRange(now - datetime.timedelta(hours=1), now), dates=DateRange(now.date() - datetime.timedelta(days=1), now.date()), ) instance.save() loaded = RangesModel.objects.get() self.assertEqual(instance.ints, loaded.ints) self.assertEqual(instance.bigints, loaded.bigints) self.assertEqual(instance.decimals, loaded.decimals) self.assertEqual(instance.timestamps, loaded.timestamps) self.assertEqual(instance.dates, loaded.dates) def test_range_object(self): r = NumericRange(0, 10) instance = RangesModel(ints=r) instance.save() loaded = RangesModel.objects.get() self.assertEqual(r, loaded.ints) def test_tuple(self): instance = RangesModel(ints=(0, 10)) instance.save() loaded = RangesModel.objects.get() self.assertEqual(NumericRange(0, 10), loaded.ints) def test_range_object_boundaries(self): r = NumericRange(0, 10, '[]') instance = RangesModel(decimals=r) instance.save() loaded = RangesModel.objects.get() self.assertEqual(r, loaded.decimals) self.assertIn(10, loaded.decimals) def test_unbounded(self): r = NumericRange(None, None, '()') instance = RangesModel(decimals=r) instance.save() loaded = RangesModel.objects.get() self.assertEqual(r, loaded.decimals) def test_empty(self): r = NumericRange(empty=True) instance = RangesModel(ints=r) instance.save() loaded = RangesModel.objects.get() self.assertEqual(r, loaded.ints) def test_null(self): instance = RangesModel(ints=None) instance.save() loaded = RangesModel.objects.get() self.assertIsNone(loaded.ints) def test_model_set_on_base_field(self): instance = RangesModel() field = instance._meta.get_field('ints') self.assertEqual(field.model, RangesModel) self.assertEqual(field.base_field.model, RangesModel) class TestRangeContainsLookup(PostgreSQLTestCase): @classmethod def setUpTestData(cls): cls.timestamps = [ datetime.datetime(year=2016, month=1, day=1), datetime.datetime(year=2016, month=1, day=2, hour=1), datetime.datetime(year=2016, month=1, day=2, hour=12), datetime.datetime(year=2016, month=1, day=3), datetime.datetime(year=2016, month=1, day=3, hour=1), datetime.datetime(year=2016, month=2, day=2), ] cls.aware_timestamps = [ timezone.make_aware(timestamp) for timestamp in cls.timestamps ] cls.dates = [ datetime.date(year=2016, month=1, day=1), datetime.date(year=2016, month=1, day=2), datetime.date(year=2016, month=1, day=3), datetime.date(year=2016, month=1, day=4), datetime.date(year=2016, month=2, day=2), datetime.date(year=2016, month=2, day=3), ] cls.obj = RangesModel.objects.create( dates=(cls.dates[0], cls.dates[3]), dates_inner=(cls.dates[1], cls.dates[2]), timestamps=(cls.timestamps[0], cls.timestamps[3]), timestamps_inner=(cls.timestamps[1], cls.timestamps[2]), ) cls.aware_obj = RangesModel.objects.create( dates=(cls.dates[0], cls.dates[3]), dates_inner=(cls.dates[1], cls.dates[2]), timestamps=(cls.aware_timestamps[0], cls.aware_timestamps[3]), timestamps_inner=(cls.timestamps[1], cls.timestamps[2]), ) # Objects that don't match any queries. for i in range(3, 4): RangesModel.objects.create( dates=(cls.dates[i], cls.dates[i + 1]), timestamps=(cls.timestamps[i], cls.timestamps[i + 1]), ) RangesModel.objects.create( dates=(cls.dates[i], cls.dates[i + 1]), timestamps=(cls.aware_timestamps[i], cls.aware_timestamps[i + 1]), ) def test_datetime_range_contains(self): filter_args = ( self.timestamps[1], self.aware_timestamps[1], (self.timestamps[1], self.timestamps[2]), (self.aware_timestamps[1], self.aware_timestamps[2]), Value(self.dates[0], output_field=DateTimeField()), Func(F('dates'), function='lower', output_field=DateTimeField()), F('timestamps_inner'), ) for filter_arg in filter_args: with self.subTest(filter_arg=filter_arg): self.assertCountEqual( RangesModel.objects.filter(**{'timestamps__contains': filter_arg}), [self.obj, self.aware_obj], ) def test_date_range_contains(self): filter_args = ( self.timestamps[1], (self.dates[1], self.dates[2]), Value(self.dates[0], output_field=DateField()), Func(F('timestamps'), function='lower', output_field=DateField()), F('dates_inner'), ) for filter_arg in filter_args: with self.subTest(filter_arg=filter_arg): self.assertCountEqual( RangesModel.objects.filter(**{'dates__contains': filter_arg}), [self.obj, self.aware_obj], ) class TestQuerying(PostgreSQLTestCase): @classmethod def setUpTestData(cls): cls.objs = RangesModel.objects.bulk_create([ RangesModel(ints=NumericRange(0, 10)), RangesModel(ints=NumericRange(5, 15)), RangesModel(ints=NumericRange(None, 0)), RangesModel(ints=NumericRange(empty=True)), RangesModel(ints=None), ]) def test_exact(self): self.assertSequenceEqual( RangesModel.objects.filter(ints__exact=NumericRange(0, 10)), [self.objs[0]], ) def test_isnull(self): self.assertSequenceEqual( RangesModel.objects.filter(ints__isnull=True), [self.objs[4]], ) def test_isempty(self): self.assertSequenceEqual( RangesModel.objects.filter(ints__isempty=True), [self.objs[3]], ) def test_contains(self): self.assertSequenceEqual( RangesModel.objects.filter(ints__contains=8), [self.objs[0], self.objs[1]], ) def test_contains_range(self): self.assertSequenceEqual( RangesModel.objects.filter(ints__contains=NumericRange(3, 8)), [self.objs[0]], ) def test_contained_by(self): self.assertSequenceEqual( RangesModel.objects.filter(ints__contained_by=NumericRange(0, 20)), [self.objs[0], self.objs[1], self.objs[3]], ) def test_overlap(self): self.assertSequenceEqual( RangesModel.objects.filter(ints__overlap=NumericRange(3, 8)), [self.objs[0], self.objs[1]], ) def test_fully_lt(self): self.assertSequenceEqual( RangesModel.objects.filter(ints__fully_lt=NumericRange(5, 10)), [self.objs[2]], ) def test_fully_gt(self): self.assertSequenceEqual( RangesModel.objects.filter(ints__fully_gt=NumericRange(5, 10)), [], ) def test_not_lt(self): self.assertSequenceEqual( RangesModel.objects.filter(ints__not_lt=NumericRange(5, 10)), [self.objs[1]], ) def test_not_gt(self): self.assertSequenceEqual( RangesModel.objects.filter(ints__not_gt=NumericRange(5, 10)), [self.objs[0], self.objs[2]], ) def test_adjacent_to(self): self.assertSequenceEqual( RangesModel.objects.filter(ints__adjacent_to=NumericRange(0, 5)), [self.objs[1], self.objs[2]], ) def test_startswith(self): self.assertSequenceEqual( RangesModel.objects.filter(ints__startswith=0), [self.objs[0]], ) def test_endswith(self): self.assertSequenceEqual( RangesModel.objects.filter(ints__endswith=0), [self.objs[2]], ) def test_startswith_chaining(self): self.assertSequenceEqual( RangesModel.objects.filter(ints__startswith__gte=0), [self.objs[0], self.objs[1]], ) class TestQueryingWithRanges(PostgreSQLTestCase): def test_date_range(self): objs = [ RangeLookupsModel.objects.create(date='2015-01-01'), RangeLookupsModel.objects.create(date='2015-05-05'), ] self.assertSequenceEqual( RangeLookupsModel.objects.filter(date__contained_by=DateRange('2015-01-01', '2015-05-04')), [objs[0]], ) def test_date_range_datetime_field(self): objs = [ RangeLookupsModel.objects.create(timestamp='2015-01-01'), RangeLookupsModel.objects.create(timestamp='2015-05-05'), ] self.assertSequenceEqual( RangeLookupsModel.objects.filter(timestamp__date__contained_by=DateRange('2015-01-01', '2015-05-04')), [objs[0]], ) def test_datetime_range(self): objs = [ RangeLookupsModel.objects.create(timestamp='2015-01-01T09:00:00'), RangeLookupsModel.objects.create(timestamp='2015-05-05T17:00:00'), ] self.assertSequenceEqual( RangeLookupsModel.objects.filter( timestamp__contained_by=DateTimeTZRange('2015-01-01T09:00', '2015-05-04T23:55') ), [objs[0]], ) def test_integer_range(self): objs = [ RangeLookupsModel.objects.create(integer=5), RangeLookupsModel.objects.create(integer=99), RangeLookupsModel.objects.create(integer=-1), ] self.assertSequenceEqual( RangeLookupsModel.objects.filter(integer__contained_by=NumericRange(1, 98)), [objs[0]] ) def test_biginteger_range(self): objs = [ RangeLookupsModel.objects.create(big_integer=5), RangeLookupsModel.objects.create(big_integer=99), RangeLookupsModel.objects.create(big_integer=-1), ] self.assertSequenceEqual( RangeLookupsModel.objects.filter(big_integer__contained_by=NumericRange(1, 98)), [objs[0]] ) def test_float_range(self): objs = [ RangeLookupsModel.objects.create(float=5), RangeLookupsModel.objects.create(float=99), RangeLookupsModel.objects.create(float=-1), ] self.assertSequenceEqual( RangeLookupsModel.objects.filter(float__contained_by=NumericRange(1, 98)), [objs[0]] ) def test_f_ranges(self): parent = RangesModel.objects.create(decimals=NumericRange(0, 10)) objs = [ RangeLookupsModel.objects.create(float=5, parent=parent), RangeLookupsModel.objects.create(float=99, parent=parent), ] self.assertSequenceEqual( RangeLookupsModel.objects.filter(float__contained_by=F('parent__decimals')), [objs[0]] ) def test_exclude(self): objs = [ RangeLookupsModel.objects.create(float=5), RangeLookupsModel.objects.create(float=99), RangeLookupsModel.objects.create(float=-1), ] self.assertSequenceEqual( RangeLookupsModel.objects.exclude(float__contained_by=NumericRange(0, 100)), [objs[2]] ) class TestSerialization(PostgreSQLSimpleTestCase): test_data = ( '[{"fields": {"ints": "{\\"upper\\": \\"10\\", \\"lower\\": \\"0\\", ' '\\"bounds\\": \\"[)\\"}", "decimals": "{\\"empty\\": true}", ' '"bigints": null, "timestamps": "{\\"upper\\": \\"2014-02-02T12:12:12+00:00\\", ' '\\"lower\\": \\"2014-01-01T00:00:00+00:00\\", \\"bounds\\": \\"[)\\"}", ' '"timestamps_inner": null, ' '"dates": "{\\"upper\\": \\"2014-02-02\\", \\"lower\\": \\"2014-01-01\\", \\"bounds\\": \\"[)\\"}", ' '"dates_inner": null }, ' '"model": "postgres_tests.rangesmodel", "pk": null}]' ) lower_date = datetime.date(2014, 1, 1) upper_date = datetime.date(2014, 2, 2) lower_dt = datetime.datetime(2014, 1, 1, 0, 0, 0, tzinfo=timezone.utc) upper_dt = datetime.datetime(2014, 2, 2, 12, 12, 12, tzinfo=timezone.utc) def test_dumping(self): instance = RangesModel( ints=NumericRange(0, 10), decimals=NumericRange(empty=True), timestamps=DateTimeTZRange(self.lower_dt, self.upper_dt), dates=DateRange(self.lower_date, self.upper_date), ) data = serializers.serialize('json', [instance]) dumped = json.loads(data) for field in ('ints', 'dates', 'timestamps'): dumped[0]['fields'][field] = json.loads(dumped[0]['fields'][field]) check = json.loads(self.test_data) for field in ('ints', 'dates', 'timestamps'): check[0]['fields'][field] = json.loads(check[0]['fields'][field]) self.assertEqual(dumped, check) def test_loading(self): instance = list(serializers.deserialize('json', self.test_data))[0].object self.assertEqual(instance.ints, NumericRange(0, 10)) self.assertEqual(instance.decimals, NumericRange(empty=True)) self.assertIsNone(instance.bigints) self.assertEqual(instance.dates, DateRange(self.lower_date, self.upper_date)) self.assertEqual(instance.timestamps, DateTimeTZRange(self.lower_dt, self.upper_dt)) def test_serialize_range_with_null(self): instance = RangesModel(ints=NumericRange(None, 10)) data = serializers.serialize('json', [instance]) new_instance = list(serializers.deserialize('json', data))[0].object self.assertEqual(new_instance.ints, NumericRange(None, 10)) instance = RangesModel(ints=NumericRange(10, None)) data = serializers.serialize('json', [instance]) new_instance = list(serializers.deserialize('json', data))[0].object self.assertEqual(new_instance.ints, NumericRange(10, None)) class TestValidators(PostgreSQLSimpleTestCase): def test_max(self): validator = RangeMaxValueValidator(5) validator(NumericRange(0, 5)) msg = 'Ensure that this range is completely less than or equal to 5.' with self.assertRaises(exceptions.ValidationError) as cm: validator(NumericRange(0, 10)) self.assertEqual(cm.exception.messages[0], msg) self.assertEqual(cm.exception.code, 'max_value') with self.assertRaisesMessage(exceptions.ValidationError, msg): validator(NumericRange(0, None)) # an unbound range def test_min(self): validator = RangeMinValueValidator(5) validator(NumericRange(10, 15)) msg = 'Ensure that this range is completely greater than or equal to 5.' with self.assertRaises(exceptions.ValidationError) as cm: validator(NumericRange(0, 10)) self.assertEqual(cm.exception.messages[0], msg) self.assertEqual(cm.exception.code, 'min_value') with self.assertRaisesMessage(exceptions.ValidationError, msg): validator(NumericRange(None, 10)) # an unbound range class TestFormField(PostgreSQLSimpleTestCase): def test_valid_integer(self): field = pg_forms.IntegerRangeField() value = field.clean(['1', '2']) self.assertEqual(value, NumericRange(1, 2)) @ignore_warnings(category=RemovedInDjango31Warning) def test_valid_floats(self): field = pg_forms.FloatRangeField() value = field.clean(['1.12345', '2.001']) self.assertEqual(value, NumericRange(1.12345, 2.001)) def test_valid_decimal(self): field = pg_forms.DecimalRangeField() value = field.clean(['1.12345', '2.001']) self.assertEqual(value, NumericRange(Decimal('1.12345'), Decimal('2.001'))) def test_float_range_field_deprecation(self): msg = 'FloatRangeField is deprecated in favor of DecimalRangeField.' with self.assertRaisesMessage(RemovedInDjango31Warning, msg): pg_forms.FloatRangeField() def test_valid_timestamps(self): field = pg_forms.DateTimeRangeField() value = field.clean(['01/01/2014 00:00:00', '02/02/2014 12:12:12']) lower = datetime.datetime(2014, 1, 1, 0, 0, 0) upper = datetime.datetime(2014, 2, 2, 12, 12, 12) self.assertEqual(value, DateTimeTZRange(lower, upper)) def test_valid_dates(self): field = pg_forms.DateRangeField() value = field.clean(['01/01/2014', '02/02/2014']) lower = datetime.date(2014, 1, 1) upper = datetime.date(2014, 2, 2) self.assertEqual(value, DateRange(lower, upper)) def test_using_split_datetime_widget(self): class SplitDateTimeRangeField(pg_forms.DateTimeRangeField): base_field = forms.SplitDateTimeField class SplitForm(forms.Form): field = SplitDateTimeRangeField() form = SplitForm() self.assertHTMLEqual(str(form), ''' <tr> <th> <label for="id_field_0">Field:</label> </th> <td> <input id="id_field_0_0" name="field_0_0" type="text"> <input id="id_field_0_1" name="field_0_1" type="text"> <input id="id_field_1_0" name="field_1_0" type="text"> <input id="id_field_1_1" name="field_1_1" type="text"> </td> </tr> ''') form = SplitForm({ 'field_0_0': '01/01/2014', 'field_0_1': '00:00:00', 'field_1_0': '02/02/2014', 'field_1_1': '12:12:12', }) self.assertTrue(form.is_valid()) lower = datetime.datetime(2014, 1, 1, 0, 0, 0) upper = datetime.datetime(2014, 2, 2, 12, 12, 12) self.assertEqual(form.cleaned_data['field'], DateTimeTZRange(lower, upper)) def test_none(self): field = pg_forms.IntegerRangeField(required=False) value = field.clean(['', '']) self.assertIsNone(value) def test_rendering(self): class RangeForm(forms.Form): ints = pg_forms.IntegerRangeField() self.assertHTMLEqual(str(RangeForm()), ''' <tr> <th><label for="id_ints_0">Ints:</label></th> <td> <input id="id_ints_0" name="ints_0" type="number"> <input id="id_ints_1" name="ints_1" type="number"> </td> </tr> ''') def test_integer_lower_bound_higher(self): field = pg_forms.IntegerRangeField() with self.assertRaises(exceptions.ValidationError) as cm: field.clean(['10', '2']) self.assertEqual(cm.exception.messages[0], 'The start of the range must not exceed the end of the range.') self.assertEqual(cm.exception.code, 'bound_ordering') def test_integer_open(self): field = pg_forms.IntegerRangeField() value = field.clean(['', '0']) self.assertEqual(value, NumericRange(None, 0)) def test_integer_incorrect_data_type(self): field = pg_forms.IntegerRangeField() with self.assertRaises(exceptions.ValidationError) as cm: field.clean('1') self.assertEqual(cm.exception.messages[0], 'Enter two whole numbers.') self.assertEqual(cm.exception.code, 'invalid') def test_integer_invalid_lower(self): field = pg_forms.IntegerRangeField() with self.assertRaises(exceptions.ValidationError) as cm: field.clean(['a', '2']) self.assertEqual(cm.exception.messages[0], 'Enter a whole number.') def test_integer_invalid_upper(self): field = pg_forms.IntegerRangeField() with self.assertRaises(exceptions.ValidationError) as cm: field.clean(['1', 'b']) self.assertEqual(cm.exception.messages[0], 'Enter a whole number.') def test_integer_required(self): field = pg_forms.IntegerRangeField(required=True) with self.assertRaises(exceptions.ValidationError) as cm: field.clean(['', '']) self.assertEqual(cm.exception.messages[0], 'This field is required.') value = field.clean([1, '']) self.assertEqual(value, NumericRange(1, None)) def test_decimal_lower_bound_higher(self): field = pg_forms.DecimalRangeField() with self.assertRaises(exceptions.ValidationError) as cm: field.clean(['1.8', '1.6']) self.assertEqual(cm.exception.messages[0], 'The start of the range must not exceed the end of the range.') self.assertEqual(cm.exception.code, 'bound_ordering') def test_decimal_open(self): field = pg_forms.DecimalRangeField() value = field.clean(['', '3.1415926']) self.assertEqual(value, NumericRange(None, Decimal('3.1415926'))) def test_decimal_incorrect_data_type(self): field = pg_forms.DecimalRangeField() with self.assertRaises(exceptions.ValidationError) as cm: field.clean('1.6') self.assertEqual(cm.exception.messages[0], 'Enter two numbers.') self.assertEqual(cm.exception.code, 'invalid') def test_decimal_invalid_lower(self): field = pg_forms.DecimalRangeField() with self.assertRaises(exceptions.ValidationError) as cm: field.clean(['a', '3.1415926']) self.assertEqual(cm.exception.messages[0], 'Enter a number.') def test_decimal_invalid_upper(self): field = pg_forms.DecimalRangeField() with self.assertRaises(exceptions.ValidationError) as cm: field.clean(['1.61803399', 'b']) self.assertEqual(cm.exception.messages[0], 'Enter a number.') def test_decimal_required(self): field = pg_forms.DecimalRangeField(required=True) with self.assertRaises(exceptions.ValidationError) as cm: field.clean(['', '']) self.assertEqual(cm.exception.messages[0], 'This field is required.') value = field.clean(['1.61803399', '']) self.assertEqual(value, NumericRange(Decimal('1.61803399'), None)) def test_date_lower_bound_higher(self): field = pg_forms.DateRangeField() with self.assertRaises(exceptions.ValidationError) as cm: field.clean(['2013-04-09', '1976-04-16']) self.assertEqual(cm.exception.messages[0], 'The start of the range must not exceed the end of the range.') self.assertEqual(cm.exception.code, 'bound_ordering') def test_date_open(self): field = pg_forms.DateRangeField() value = field.clean(['', '2013-04-09']) self.assertEqual(value, DateRange(None, datetime.date(2013, 4, 9))) def test_date_incorrect_data_type(self): field = pg_forms.DateRangeField() with self.assertRaises(exceptions.ValidationError) as cm: field.clean('1') self.assertEqual(cm.exception.messages[0], 'Enter two valid dates.') self.assertEqual(cm.exception.code, 'invalid') def test_date_invalid_lower(self): field = pg_forms.DateRangeField() with self.assertRaises(exceptions.ValidationError) as cm: field.clean(['a', '2013-04-09']) self.assertEqual(cm.exception.messages[0], 'Enter a valid date.') def test_date_invalid_upper(self): field = pg_forms.DateRangeField() with self.assertRaises(exceptions.ValidationError) as cm: field.clean(['2013-04-09', 'b']) self.assertEqual(cm.exception.messages[0], 'Enter a valid date.') def test_date_required(self): field = pg_forms.DateRangeField(required=True) with self.assertRaises(exceptions.ValidationError) as cm: field.clean(['', '']) self.assertEqual(cm.exception.messages[0], 'This field is required.') value = field.clean(['1976-04-16', '']) self.assertEqual(value, DateRange(datetime.date(1976, 4, 16), None)) def test_datetime_lower_bound_higher(self): field = pg_forms.DateTimeRangeField() with self.assertRaises(exceptions.ValidationError) as cm: field.clean(['2006-10-25 14:59', '2006-10-25 14:58']) self.assertEqual(cm.exception.messages[0], 'The start of the range must not exceed the end of the range.') self.assertEqual(cm.exception.code, 'bound_ordering') def test_datetime_open(self): field = pg_forms.DateTimeRangeField() value = field.clean(['', '2013-04-09 11:45']) self.assertEqual(value, DateTimeTZRange(None, datetime.datetime(2013, 4, 9, 11, 45))) def test_datetime_incorrect_data_type(self): field = pg_forms.DateTimeRangeField() with self.assertRaises(exceptions.ValidationError) as cm: field.clean('2013-04-09 11:45') self.assertEqual(cm.exception.messages[0], 'Enter two valid date/times.') self.assertEqual(cm.exception.code, 'invalid') def test_datetime_invalid_lower(self): field = pg_forms.DateTimeRangeField() with self.assertRaises(exceptions.ValidationError) as cm: field.clean(['45', '2013-04-09 11:45']) self.assertEqual(cm.exception.messages[0], 'Enter a valid date/time.') def test_datetime_invalid_upper(self): field = pg_forms.DateTimeRangeField() with self.assertRaises(exceptions.ValidationError) as cm: field.clean(['2013-04-09 11:45', 'sweet pickles']) self.assertEqual(cm.exception.messages[0], 'Enter a valid date/time.') def test_datetime_required(self): field = pg_forms.DateTimeRangeField(required=True) with self.assertRaises(exceptions.ValidationError) as cm: field.clean(['', '']) self.assertEqual(cm.exception.messages[0], 'This field is required.') value = field.clean(['2013-04-09 11:45', '']) self.assertEqual(value, DateTimeTZRange(datetime.datetime(2013, 4, 9, 11, 45), None)) @override_settings(USE_TZ=True, TIME_ZONE='Africa/Johannesburg') def test_datetime_prepare_value(self): field = pg_forms.DateTimeRangeField() value = field.prepare_value( DateTimeTZRange(datetime.datetime(2015, 5, 22, 16, 6, 33, tzinfo=timezone.utc), None) ) self.assertEqual(value, [datetime.datetime(2015, 5, 22, 18, 6, 33), None]) def test_model_field_formfield_integer(self): model_field = pg_fields.IntegerRangeField() form_field = model_field.formfield() self.assertIsInstance(form_field, pg_forms.IntegerRangeField) def test_model_field_formfield_biginteger(self): model_field = pg_fields.BigIntegerRangeField() form_field = model_field.formfield() self.assertIsInstance(form_field, pg_forms.IntegerRangeField) def test_model_field_formfield_float(self): model_field = pg_fields.DecimalRangeField() form_field = model_field.formfield() self.assertIsInstance(form_field, pg_forms.DecimalRangeField) def test_model_field_formfield_date(self): model_field = pg_fields.DateRangeField() form_field = model_field.formfield() self.assertIsInstance(form_field, pg_forms.DateRangeField) def test_model_field_formfield_datetime(self): model_field = pg_fields.DateTimeRangeField() form_field = model_field.formfield() self.assertIsInstance(form_field, pg_forms.DateTimeRangeField) class TestWidget(PostgreSQLSimpleTestCase): def test_range_widget(self): f = pg_forms.ranges.DateTimeRangeField() self.assertHTMLEqual( f.widget.render('datetimerange', ''), '<input type="text" name="datetimerange_0"><input type="text" name="datetimerange_1">' ) self.assertHTMLEqual( f.widget.render('datetimerange', None), '<input type="text" name="datetimerange_0"><input type="text" name="datetimerange_1">' ) dt_range = DateTimeTZRange( datetime.datetime(2006, 1, 10, 7, 30), datetime.datetime(2006, 2, 12, 9, 50) ) self.assertHTMLEqual( f.widget.render('datetimerange', dt_range), '<input type="text" name="datetimerange_0" value="2006-01-10 07:30:00">' '<input type="text" name="datetimerange_1" value="2006-02-12 09:50:00">' )
4288b0a61b2a5463df75c29a5eb43294ae76e0c035eb027f25c2a5b8c0900d53
import datetime import operator import uuid from decimal import Decimal from django.core import checks, exceptions, serializers from django.core.serializers.json import DjangoJSONEncoder from django.db import connection from django.db.models import Count, F, Q from django.db.models.expressions import RawSQL from django.db.models.functions import Cast from django.forms import CharField, Form, widgets from django.test.utils import CaptureQueriesContext, isolate_apps from django.utils.html import escape from . import PostgreSQLSimpleTestCase, PostgreSQLTestCase from .models import JSONModel, PostgreSQLModel try: from django.contrib.postgres import forms from django.contrib.postgres.fields import JSONField from django.contrib.postgres.fields.jsonb import KeyTextTransform, KeyTransform except ImportError: pass class TestModelMetaOrdering(PostgreSQLSimpleTestCase): def test_ordering_by_json_field_value(self): class TestJSONModel(JSONModel): class Meta: ordering = ['field__value'] self.assertEqual(TestJSONModel.check(), []) class TestSaveLoad(PostgreSQLTestCase): def test_null(self): instance = JSONModel() instance.save() loaded = JSONModel.objects.get() self.assertIsNone(loaded.field) def test_empty_object(self): instance = JSONModel(field={}) instance.save() loaded = JSONModel.objects.get() self.assertEqual(loaded.field, {}) def test_empty_list(self): instance = JSONModel(field=[]) instance.save() loaded = JSONModel.objects.get() self.assertEqual(loaded.field, []) def test_boolean(self): instance = JSONModel(field=True) instance.save() loaded = JSONModel.objects.get() self.assertIs(loaded.field, True) def test_string(self): instance = JSONModel(field='why?') instance.save() loaded = JSONModel.objects.get() self.assertEqual(loaded.field, 'why?') def test_number(self): instance = JSONModel(field=1) instance.save() loaded = JSONModel.objects.get() self.assertEqual(loaded.field, 1) def test_realistic_object(self): obj = { 'a': 'b', 'c': 1, 'd': ['e', {'f': 'g'}], 'h': True, 'i': False, 'j': None, } instance = JSONModel(field=obj) instance.save() loaded = JSONModel.objects.get() self.assertEqual(loaded.field, obj) def test_custom_encoding(self): """ JSONModel.field_custom has a custom DjangoJSONEncoder. """ some_uuid = uuid.uuid4() obj_before = { 'date': datetime.date(2016, 8, 12), 'datetime': datetime.datetime(2016, 8, 12, 13, 44, 47, 575981), 'decimal': Decimal('10.54'), 'uuid': some_uuid, } obj_after = { 'date': '2016-08-12', 'datetime': '2016-08-12T13:44:47.575', 'decimal': '10.54', 'uuid': str(some_uuid), } JSONModel.objects.create(field_custom=obj_before) loaded = JSONModel.objects.get() self.assertEqual(loaded.field_custom, obj_after) class TestQuerying(PostgreSQLTestCase): @classmethod def setUpTestData(cls): cls.objs = JSONModel.objects.bulk_create([ JSONModel(field=None), JSONModel(field=True), JSONModel(field=False), JSONModel(field='yes'), JSONModel(field=7), JSONModel(field=[]), JSONModel(field={}), JSONModel(field={ 'a': 'b', 'c': 1, }), JSONModel(field={ 'a': 'b', 'c': 1, 'd': ['e', {'f': 'g'}], 'h': True, 'i': False, 'j': None, 'k': {'l': 'm'}, }), JSONModel(field=[1, [2]]), JSONModel(field={ 'k': True, 'l': False, }), JSONModel(field={'foo': 'bar'}), ]) def test_exact(self): self.assertSequenceEqual( JSONModel.objects.filter(field__exact={}), [self.objs[6]] ) def test_exact_complex(self): self.assertSequenceEqual( JSONModel.objects.filter(field__exact={'a': 'b', 'c': 1}), [self.objs[7]] ) def test_isnull(self): self.assertSequenceEqual( JSONModel.objects.filter(field__isnull=True), [self.objs[0]] ) def test_ordering_by_transform(self): objs = [ JSONModel.objects.create(field={'ord': 93, 'name': 'bar'}), JSONModel.objects.create(field={'ord': 22.1, 'name': 'foo'}), JSONModel.objects.create(field={'ord': -1, 'name': 'baz'}), JSONModel.objects.create(field={'ord': 21.931902, 'name': 'spam'}), JSONModel.objects.create(field={'ord': -100291029, 'name': 'eggs'}), ] query = JSONModel.objects.filter(field__name__isnull=False).order_by('field__ord') self.assertSequenceEqual(query, [objs[4], objs[2], objs[3], objs[1], objs[0]]) def test_ordering_grouping_by_key_transform(self): base_qs = JSONModel.objects.filter(field__d__0__isnull=False) for qs in ( base_qs.order_by('field__d__0'), base_qs.annotate(key=KeyTransform('0', KeyTransform('d', 'field'))).order_by('key'), ): self.assertSequenceEqual(qs, [self.objs[8]]) qs = JSONModel.objects.filter(field__isnull=False) self.assertQuerysetEqual( qs.values('field__d__0').annotate(count=Count('field__d__0')).order_by('count'), [1, 10], operator.itemgetter('count'), ) self.assertQuerysetEqual( qs.filter(field__isnull=False).annotate( key=KeyTextTransform('f', KeyTransform('1', KeyTransform('d', 'field'))), ).values('key').annotate(count=Count('key')).order_by('count'), [(None, 0), ('g', 1)], operator.itemgetter('key', 'count'), ) def test_key_transform_raw_expression(self): expr = RawSQL('%s::jsonb', ['{"x": "bar"}']) self.assertSequenceEqual( JSONModel.objects.filter(field__foo=KeyTransform('x', expr)), [self.objs[-1]], ) def test_key_transform_expression(self): self.assertSequenceEqual( JSONModel.objects.filter(field__d__0__isnull=False).annotate( key=KeyTransform('d', 'field'), chain=KeyTransform('0', 'key'), expr=KeyTransform('0', Cast('key', JSONField())), ).filter(chain=F('expr')), [self.objs[8]], ) def test_nested_key_transform_raw_expression(self): expr = RawSQL('%s::jsonb', ['{"x": {"y": "bar"}}']) self.assertSequenceEqual( JSONModel.objects.filter(field__foo=KeyTransform('y', KeyTransform('x', expr))), [self.objs[-1]], ) def test_nested_key_transform_expression(self): self.assertSequenceEqual( JSONModel.objects.filter(field__d__0__isnull=False).annotate( key=KeyTransform('d', 'field'), chain=KeyTransform('f', KeyTransform('1', 'key')), expr=KeyTransform('f', KeyTransform('1', Cast('key', JSONField()))), ).filter(chain=F('expr')), [self.objs[8]], ) def test_deep_values(self): query = JSONModel.objects.values_list('field__k__l') self.assertSequenceEqual( query, [ (None,), (None,), (None,), (None,), (None,), (None,), (None,), (None,), ('m',), (None,), (None,), (None,), ] ) def test_deep_distinct(self): query = JSONModel.objects.distinct('field__k__l').values_list('field__k__l') self.assertSequenceEqual(query, [('m',), (None,)]) def test_isnull_key(self): # key__isnull works the same as has_key='key'. self.assertSequenceEqual( JSONModel.objects.filter(field__a__isnull=True), self.objs[:7] + self.objs[9:] ) self.assertSequenceEqual( JSONModel.objects.filter(field__a__isnull=False), [self.objs[7], self.objs[8]] ) def test_none_key(self): self.assertSequenceEqual(JSONModel.objects.filter(field__j=None), [self.objs[8]]) def test_none_key_exclude(self): obj = JSONModel.objects.create(field={'j': 1}) self.assertSequenceEqual(JSONModel.objects.exclude(field__j=None), [obj]) def test_isnull_key_or_none(self): obj = JSONModel.objects.create(field={'a': None}) self.assertSequenceEqual( JSONModel.objects.filter(Q(field__a__isnull=True) | Q(field__a=None)), self.objs[:7] + self.objs[9:] + [obj] ) def test_contains(self): self.assertSequenceEqual( JSONModel.objects.filter(field__contains={'a': 'b'}), [self.objs[7], self.objs[8]] ) def test_contained_by(self): self.assertSequenceEqual( JSONModel.objects.filter(field__contained_by={'a': 'b', 'c': 1, 'h': True}), [self.objs[6], self.objs[7]] ) def test_has_key(self): self.assertSequenceEqual( JSONModel.objects.filter(field__has_key='a'), [self.objs[7], self.objs[8]] ) def test_has_keys(self): self.assertSequenceEqual( JSONModel.objects.filter(field__has_keys=['a', 'c', 'h']), [self.objs[8]] ) def test_has_any_keys(self): self.assertSequenceEqual( JSONModel.objects.filter(field__has_any_keys=['c', 'l']), [self.objs[7], self.objs[8], self.objs[10]] ) def test_shallow_list_lookup(self): self.assertSequenceEqual( JSONModel.objects.filter(field__0=1), [self.objs[9]] ) def test_shallow_obj_lookup(self): self.assertSequenceEqual( JSONModel.objects.filter(field__a='b'), [self.objs[7], self.objs[8]] ) def test_deep_lookup_objs(self): self.assertSequenceEqual( JSONModel.objects.filter(field__k__l='m'), [self.objs[8]] ) def test_shallow_lookup_obj_target(self): self.assertSequenceEqual( JSONModel.objects.filter(field__k={'l': 'm'}), [self.objs[8]] ) def test_deep_lookup_array(self): self.assertSequenceEqual( JSONModel.objects.filter(field__1__0=2), [self.objs[9]] ) def test_deep_lookup_mixed(self): self.assertSequenceEqual( JSONModel.objects.filter(field__d__1__f='g'), [self.objs[8]] ) def test_deep_lookup_transform(self): self.assertSequenceEqual( JSONModel.objects.filter(field__c__gt=1), [] ) self.assertSequenceEqual( JSONModel.objects.filter(field__c__lt=5), [self.objs[7], self.objs[8]] ) def test_usage_in_subquery(self): self.assertSequenceEqual( JSONModel.objects.filter(id__in=JSONModel.objects.filter(field__c=1)), self.objs[7:9] ) def test_iexact(self): self.assertTrue(JSONModel.objects.filter(field__foo__iexact='BaR').exists()) self.assertFalse(JSONModel.objects.filter(field__foo__iexact='"BaR"').exists()) def test_icontains(self): self.assertFalse(JSONModel.objects.filter(field__foo__icontains='"bar"').exists()) def test_startswith(self): self.assertTrue(JSONModel.objects.filter(field__foo__startswith='b').exists()) def test_istartswith(self): self.assertTrue(JSONModel.objects.filter(field__foo__istartswith='B').exists()) def test_endswith(self): self.assertTrue(JSONModel.objects.filter(field__foo__endswith='r').exists()) def test_iendswith(self): self.assertTrue(JSONModel.objects.filter(field__foo__iendswith='R').exists()) def test_regex(self): self.assertTrue(JSONModel.objects.filter(field__foo__regex=r'^bar$').exists()) def test_iregex(self): self.assertTrue(JSONModel.objects.filter(field__foo__iregex=r'^bAr$').exists()) def test_key_sql_injection(self): with CaptureQueriesContext(connection) as queries: self.assertFalse( JSONModel.objects.filter(**{ """field__test' = '"a"') OR 1 = 1 OR ('d""": 'x', }).exists() ) self.assertIn( """."field" -> 'test'' = ''"a"'') OR 1 = 1 OR (''d') = '"x"' """, queries[0]['sql'], ) @isolate_apps('postgres_tests') class TestChecks(PostgreSQLSimpleTestCase): def test_invalid_default(self): class MyModel(PostgreSQLModel): field = JSONField(default={}) model = MyModel() self.assertEqual(model.check(), [ checks.Warning( msg=( "JSONField default should be a callable instead of an " "instance so that it's not shared between all field " "instances." ), hint='Use a callable instead, e.g., use `dict` instead of `{}`.', obj=MyModel._meta.get_field('field'), id='postgres.E003', ) ]) def test_valid_default(self): class MyModel(PostgreSQLModel): field = JSONField(default=dict) model = MyModel() self.assertEqual(model.check(), []) def test_valid_default_none(self): class MyModel(PostgreSQLModel): field = JSONField(default=None) model = MyModel() self.assertEqual(model.check(), []) class TestSerialization(PostgreSQLSimpleTestCase): test_data = ( '[{"fields": {"field": %s, "field_custom": null}, ' '"model": "postgres_tests.jsonmodel", "pk": null}]' ) test_values = ( # (Python value, serialized value), ({'a': 'b', 'c': None}, '{"a": "b", "c": null}'), ('abc', '"abc"'), ('{"a": "a"}', '"{\\"a\\": \\"a\\"}"'), ) def test_dumping(self): for value, serialized in self.test_values: with self.subTest(value=value): instance = JSONModel(field=value) data = serializers.serialize('json', [instance]) self.assertJSONEqual(data, self.test_data % serialized) def test_loading(self): for value, serialized in self.test_values: with self.subTest(value=value): instance = list(serializers.deserialize('json', self.test_data % serialized))[0].object self.assertEqual(instance.field, value) class TestValidation(PostgreSQLSimpleTestCase): def test_not_serializable(self): field = JSONField() with self.assertRaises(exceptions.ValidationError) as cm: field.clean(datetime.timedelta(days=1), None) self.assertEqual(cm.exception.code, 'invalid') self.assertEqual(cm.exception.message % cm.exception.params, "Value must be valid JSON.") def test_custom_encoder(self): with self.assertRaisesMessage(ValueError, "The encoder parameter must be a callable object."): field = JSONField(encoder=DjangoJSONEncoder()) field = JSONField(encoder=DjangoJSONEncoder) self.assertEqual(field.clean(datetime.timedelta(days=1), None), datetime.timedelta(days=1)) class TestFormField(PostgreSQLSimpleTestCase): def test_valid(self): field = forms.JSONField() value = field.clean('{"a": "b"}') self.assertEqual(value, {'a': 'b'}) def test_valid_empty(self): field = forms.JSONField(required=False) value = field.clean('') self.assertIsNone(value) def test_invalid(self): field = forms.JSONField() with self.assertRaises(exceptions.ValidationError) as cm: field.clean('{some badly formed: json}') self.assertEqual(cm.exception.messages[0], '“{some badly formed: json}” value must be valid JSON.') def test_formfield(self): model_field = JSONField() form_field = model_field.formfield() self.assertIsInstance(form_field, forms.JSONField) def test_formfield_disabled(self): class JsonForm(Form): name = CharField() jfield = forms.JSONField(disabled=True) form = JsonForm({'name': 'xyz', 'jfield': '["bar"]'}, initial={'jfield': ['foo']}) self.assertIn('[&quot;foo&quot;]</textarea>', form.as_p()) def test_prepare_value(self): field = forms.JSONField() self.assertEqual(field.prepare_value({'a': 'b'}), '{"a": "b"}') self.assertEqual(field.prepare_value(None), 'null') self.assertEqual(field.prepare_value('foo'), '"foo"') def test_redisplay_wrong_input(self): """ When displaying a bound form (typically due to invalid input), the form should not overquote JSONField inputs. """ class JsonForm(Form): name = CharField(max_length=2) jfield = forms.JSONField() # JSONField input is fine, name is too long form = JsonForm({'name': 'xyz', 'jfield': '["foo"]'}) self.assertIn('[&quot;foo&quot;]</textarea>', form.as_p()) # This time, the JSONField input is wrong form = JsonForm({'name': 'xy', 'jfield': '{"foo"}'}) # Appears once in the textarea and once in the error message self.assertEqual(form.as_p().count(escape('{"foo"}')), 2) def test_widget(self): """The default widget of a JSONField is a Textarea.""" field = forms.JSONField() self.assertIsInstance(field.widget, widgets.Textarea) def test_custom_widget_kwarg(self): """The widget can be overridden with a kwarg.""" field = forms.JSONField(widget=widgets.Input) self.assertIsInstance(field.widget, widgets.Input) def test_custom_widget_attribute(self): """The widget can be overridden with an attribute.""" class CustomJSONField(forms.JSONField): widget = widgets.Input field = CustomJSONField() self.assertIsInstance(field.widget, widgets.Input) def test_already_converted_value(self): field = forms.JSONField(required=False) tests = [ '["a", "b", "c"]', '{"a": 1, "b": 2}', '1', '1.5', '"foo"', 'true', 'false', 'null', ] for json_string in tests: val = field.clean(json_string) self.assertEqual(field.clean(val), val) def test_has_changed(self): field = forms.JSONField() self.assertIs(field.has_changed({'a': True}, '{"a": 1}'), True) self.assertIs(field.has_changed({'a': 1, 'b': 2}, '{"b": 2, "a": 1}'), False)
364dd05d324a76f94363bd410ccf542ffc194f9ee261fd4ca8cef8c909ae5fc8
import json from django.core import checks, exceptions, serializers from django.db import connection from django.db.models.expressions import RawSQL from django.forms import Form from django.test.utils import CaptureQueriesContext, isolate_apps from . import PostgreSQLSimpleTestCase, PostgreSQLTestCase from .models import HStoreModel, PostgreSQLModel try: from django.contrib.postgres import forms from django.contrib.postgres.fields import HStoreField from django.contrib.postgres.fields.hstore import KeyTransform from django.contrib.postgres.validators import KeysValidator except ImportError: pass class SimpleTests(PostgreSQLTestCase): def test_save_load_success(self): value = {'a': 'b'} instance = HStoreModel(field=value) instance.save() reloaded = HStoreModel.objects.get() self.assertEqual(reloaded.field, value) def test_null(self): instance = HStoreModel(field=None) instance.save() reloaded = HStoreModel.objects.get() self.assertIsNone(reloaded.field) def test_value_null(self): value = {'a': None} instance = HStoreModel(field=value) instance.save() reloaded = HStoreModel.objects.get() self.assertEqual(reloaded.field, value) def test_key_val_cast_to_string(self): value = {'a': 1, 'b': 'B', 2: 'c', 'ï': 'ê'} expected_value = {'a': '1', 'b': 'B', '2': 'c', 'ï': 'ê'} instance = HStoreModel.objects.create(field=value) instance = HStoreModel.objects.get() self.assertEqual(instance.field, expected_value) instance = HStoreModel.objects.get(field__a=1) self.assertEqual(instance.field, expected_value) instance = HStoreModel.objects.get(field__has_keys=[2, 'a', 'ï']) self.assertEqual(instance.field, expected_value) def test_array_field(self): value = [ {'a': 1, 'b': 'B', 2: 'c', 'ï': 'ê'}, {'a': 1, 'b': 'B', 2: 'c', 'ï': 'ê'}, ] expected_value = [ {'a': '1', 'b': 'B', '2': 'c', 'ï': 'ê'}, {'a': '1', 'b': 'B', '2': 'c', 'ï': 'ê'}, ] instance = HStoreModel.objects.create(array_field=value) instance.refresh_from_db() self.assertEqual(instance.array_field, expected_value) class TestQuerying(PostgreSQLTestCase): @classmethod def setUpTestData(cls): cls.objs = HStoreModel.objects.bulk_create([ HStoreModel(field={'a': 'b'}), HStoreModel(field={'a': 'b', 'c': 'd'}), HStoreModel(field={'c': 'd'}), HStoreModel(field={}), HStoreModel(field=None), ]) def test_exact(self): self.assertSequenceEqual( HStoreModel.objects.filter(field__exact={'a': 'b'}), self.objs[:1] ) def test_contained_by(self): self.assertSequenceEqual( HStoreModel.objects.filter(field__contained_by={'a': 'b', 'c': 'd'}), self.objs[:4] ) def test_contains(self): self.assertSequenceEqual( HStoreModel.objects.filter(field__contains={'a': 'b'}), self.objs[:2] ) def test_in_generator(self): def search(): yield {'a': 'b'} self.assertSequenceEqual( HStoreModel.objects.filter(field__in=search()), self.objs[:1] ) def test_has_key(self): self.assertSequenceEqual( HStoreModel.objects.filter(field__has_key='c'), self.objs[1:3] ) def test_has_keys(self): self.assertSequenceEqual( HStoreModel.objects.filter(field__has_keys=['a', 'c']), self.objs[1:2] ) def test_has_any_keys(self): self.assertSequenceEqual( HStoreModel.objects.filter(field__has_any_keys=['a', 'c']), self.objs[:3] ) def test_key_transform(self): self.assertSequenceEqual( HStoreModel.objects.filter(field__a='b'), self.objs[:2] ) def test_key_transform_raw_expression(self): expr = RawSQL('%s::hstore', ['x => b, y => c']) self.assertSequenceEqual( HStoreModel.objects.filter(field__a=KeyTransform('x', expr)), self.objs[:2] ) def test_keys(self): self.assertSequenceEqual( HStoreModel.objects.filter(field__keys=['a']), self.objs[:1] ) def test_values(self): self.assertSequenceEqual( HStoreModel.objects.filter(field__values=['b']), self.objs[:1] ) def test_field_chaining(self): self.assertSequenceEqual( HStoreModel.objects.filter(field__a__contains='b'), self.objs[:2] ) def test_order_by_field(self): more_objs = ( HStoreModel.objects.create(field={'g': '637'}), HStoreModel.objects.create(field={'g': '002'}), HStoreModel.objects.create(field={'g': '042'}), HStoreModel.objects.create(field={'g': '981'}), ) self.assertSequenceEqual( HStoreModel.objects.filter(field__has_key='g').order_by('field__g'), [more_objs[1], more_objs[2], more_objs[0], more_objs[3]] ) def test_keys_contains(self): self.assertSequenceEqual( HStoreModel.objects.filter(field__keys__contains=['a']), self.objs[:2] ) def test_values_overlap(self): self.assertSequenceEqual( HStoreModel.objects.filter(field__values__overlap=['b', 'd']), self.objs[:3] ) def test_key_isnull(self): obj = HStoreModel.objects.create(field={'a': None}) self.assertSequenceEqual( HStoreModel.objects.filter(field__a__isnull=True), self.objs[2:5] + [obj] ) self.assertSequenceEqual( HStoreModel.objects.filter(field__a__isnull=False), self.objs[:2] ) def test_usage_in_subquery(self): self.assertSequenceEqual( HStoreModel.objects.filter(id__in=HStoreModel.objects.filter(field__a='b')), self.objs[:2] ) def test_key_sql_injection(self): with CaptureQueriesContext(connection) as queries: self.assertFalse( HStoreModel.objects.filter(**{ "field__test' = 'a') OR 1 = 1 OR ('d": 'x', }).exists() ) self.assertIn( """."field" -> 'test'' = ''a'') OR 1 = 1 OR (''d') = 'x' """, queries[0]['sql'], ) @isolate_apps('postgres_tests') class TestChecks(PostgreSQLSimpleTestCase): def test_invalid_default(self): class MyModel(PostgreSQLModel): field = HStoreField(default={}) model = MyModel() self.assertEqual(model.check(), [ checks.Warning( msg=( "HStoreField default should be a callable instead of an " "instance so that it's not shared between all field " "instances." ), hint='Use a callable instead, e.g., use `dict` instead of `{}`.', obj=MyModel._meta.get_field('field'), id='postgres.E003', ) ]) def test_valid_default(self): class MyModel(PostgreSQLModel): field = HStoreField(default=dict) self.assertEqual(MyModel().check(), []) class TestSerialization(PostgreSQLSimpleTestCase): test_data = json.dumps([{ 'model': 'postgres_tests.hstoremodel', 'pk': None, 'fields': { 'field': json.dumps({'a': 'b'}), 'array_field': json.dumps([ json.dumps({'a': 'b'}), json.dumps({'b': 'a'}), ]), }, }]) def test_dumping(self): instance = HStoreModel(field={'a': 'b'}, array_field=[{'a': 'b'}, {'b': 'a'}]) data = serializers.serialize('json', [instance]) self.assertEqual(json.loads(data), json.loads(self.test_data)) def test_loading(self): instance = list(serializers.deserialize('json', self.test_data))[0].object self.assertEqual(instance.field, {'a': 'b'}) self.assertEqual(instance.array_field, [{'a': 'b'}, {'b': 'a'}]) def test_roundtrip_with_null(self): instance = HStoreModel(field={'a': 'b', 'c': None}) data = serializers.serialize('json', [instance]) new_instance = list(serializers.deserialize('json', data))[0].object self.assertEqual(instance.field, new_instance.field) class TestValidation(PostgreSQLSimpleTestCase): def test_not_a_string(self): field = HStoreField() with self.assertRaises(exceptions.ValidationError) as cm: field.clean({'a': 1}, None) self.assertEqual(cm.exception.code, 'not_a_string') self.assertEqual(cm.exception.message % cm.exception.params, 'The value of “a” is not a string or null.') def test_none_allowed_as_value(self): field = HStoreField() self.assertEqual(field.clean({'a': None}, None), {'a': None}) class TestFormField(PostgreSQLSimpleTestCase): def test_valid(self): field = forms.HStoreField() value = field.clean('{"a": "b"}') self.assertEqual(value, {'a': 'b'}) def test_invalid_json(self): field = forms.HStoreField() with self.assertRaises(exceptions.ValidationError) as cm: field.clean('{"a": "b"') self.assertEqual(cm.exception.messages[0], 'Could not load JSON data.') self.assertEqual(cm.exception.code, 'invalid_json') def test_non_dict_json(self): field = forms.HStoreField() msg = 'Input must be a JSON dictionary.' with self.assertRaisesMessage(exceptions.ValidationError, msg) as cm: field.clean('["a", "b", 1]') self.assertEqual(cm.exception.code, 'invalid_format') def test_not_string_values(self): field = forms.HStoreField() value = field.clean('{"a": 1}') self.assertEqual(value, {'a': '1'}) def test_none_value(self): field = forms.HStoreField() value = field.clean('{"a": null}') self.assertEqual(value, {'a': None}) def test_empty(self): field = forms.HStoreField(required=False) value = field.clean('') self.assertEqual(value, {}) def test_model_field_formfield(self): model_field = HStoreField() form_field = model_field.formfield() self.assertIsInstance(form_field, forms.HStoreField) def test_field_has_changed(self): class HStoreFormTest(Form): f1 = forms.HStoreField() form_w_hstore = HStoreFormTest() self.assertFalse(form_w_hstore.has_changed()) form_w_hstore = HStoreFormTest({'f1': '{"a": 1}'}) self.assertTrue(form_w_hstore.has_changed()) form_w_hstore = HStoreFormTest({'f1': '{"a": 1}'}, initial={'f1': '{"a": 1}'}) self.assertFalse(form_w_hstore.has_changed()) form_w_hstore = HStoreFormTest({'f1': '{"a": 2}'}, initial={'f1': '{"a": 1}'}) self.assertTrue(form_w_hstore.has_changed()) form_w_hstore = HStoreFormTest({'f1': '{"a": 1}'}, initial={'f1': {"a": 1}}) self.assertFalse(form_w_hstore.has_changed()) form_w_hstore = HStoreFormTest({'f1': '{"a": 2}'}, initial={'f1': {"a": 1}}) self.assertTrue(form_w_hstore.has_changed()) class TestValidator(PostgreSQLSimpleTestCase): def test_simple_valid(self): validator = KeysValidator(keys=['a', 'b']) validator({'a': 'foo', 'b': 'bar', 'c': 'baz'}) def test_missing_keys(self): validator = KeysValidator(keys=['a', 'b']) with self.assertRaises(exceptions.ValidationError) as cm: validator({'a': 'foo', 'c': 'baz'}) self.assertEqual(cm.exception.messages[0], 'Some keys were missing: b') self.assertEqual(cm.exception.code, 'missing_keys') def test_strict_valid(self): validator = KeysValidator(keys=['a', 'b'], strict=True) validator({'a': 'foo', 'b': 'bar'}) def test_extra_keys(self): validator = KeysValidator(keys=['a', 'b'], strict=True) with self.assertRaises(exceptions.ValidationError) as cm: validator({'a': 'foo', 'b': 'bar', 'c': 'baz'}) self.assertEqual(cm.exception.messages[0], 'Some unknown keys were provided: c') self.assertEqual(cm.exception.code, 'extra_keys') def test_custom_messages(self): messages = { 'missing_keys': 'Foobar', } validator = KeysValidator(keys=['a', 'b'], strict=True, messages=messages) with self.assertRaises(exceptions.ValidationError) as cm: validator({'a': 'foo', 'c': 'baz'}) self.assertEqual(cm.exception.messages[0], 'Foobar') self.assertEqual(cm.exception.code, 'missing_keys') with self.assertRaises(exceptions.ValidationError) as cm: validator({'a': 'foo', 'b': 'bar', 'c': 'baz'}) self.assertEqual(cm.exception.messages[0], 'Some unknown keys were provided: c') self.assertEqual(cm.exception.code, 'extra_keys') def test_deconstruct(self): messages = { 'missing_keys': 'Foobar', } validator = KeysValidator(keys=['a', 'b'], strict=True, messages=messages) path, args, kwargs = validator.deconstruct() self.assertEqual(path, 'django.contrib.postgres.validators.KeysValidator') self.assertEqual(args, ()) self.assertEqual(kwargs, {'keys': ['a', 'b'], 'strict': True, 'messages': messages})
21f93171afb0bb93d0c63c2096664ea71891c532c808d5b2118e9fb1f1193742
import datetime from django.db import connection, transaction from django.db.models import F, Func, Q from django.db.models.constraints import CheckConstraint from django.db.utils import IntegrityError from django.utils import timezone from . import PostgreSQLTestCase from .models import HotelReservation, RangesModel, Room try: from django.contrib.postgres.constraints import ExclusionConstraint from django.contrib.postgres.fields import DateTimeRangeField, RangeBoundary, RangeOperators from psycopg2.extras import DateRange, NumericRange except ImportError: pass class SchemaTests(PostgreSQLTestCase): def get_constraints(self, table): """Get the constraints on the table using a new cursor.""" with connection.cursor() as cursor: return connection.introspection.get_constraints(cursor, table) def test_check_constraint_range_value(self): constraint_name = 'ints_between' self.assertNotIn(constraint_name, self.get_constraints(RangesModel._meta.db_table)) constraint = CheckConstraint( check=Q(ints__contained_by=NumericRange(10, 30)), name=constraint_name, ) with connection.schema_editor() as editor: editor.add_constraint(RangesModel, constraint) self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table)) with self.assertRaises(IntegrityError), transaction.atomic(): RangesModel.objects.create(ints=(20, 50)) RangesModel.objects.create(ints=(10, 30)) def test_check_constraint_daterange_contains(self): constraint_name = 'dates_contains' self.assertNotIn(constraint_name, self.get_constraints(RangesModel._meta.db_table)) constraint = CheckConstraint( check=Q(dates__contains=F('dates_inner')), name=constraint_name, ) with connection.schema_editor() as editor: editor.add_constraint(RangesModel, constraint) self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table)) date_1 = datetime.date(2016, 1, 1) date_2 = datetime.date(2016, 1, 4) with self.assertRaises(IntegrityError), transaction.atomic(): RangesModel.objects.create( dates=(date_1, date_2), dates_inner=(date_1, date_2.replace(day=5)), ) RangesModel.objects.create( dates=(date_1, date_2), dates_inner=(date_1, date_2), ) def test_check_constraint_datetimerange_contains(self): constraint_name = 'timestamps_contains' self.assertNotIn(constraint_name, self.get_constraints(RangesModel._meta.db_table)) constraint = CheckConstraint( check=Q(timestamps__contains=F('timestamps_inner')), name=constraint_name, ) with connection.schema_editor() as editor: editor.add_constraint(RangesModel, constraint) self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table)) datetime_1 = datetime.datetime(2016, 1, 1) datetime_2 = datetime.datetime(2016, 1, 2, 12) with self.assertRaises(IntegrityError), transaction.atomic(): RangesModel.objects.create( timestamps=(datetime_1, datetime_2), timestamps_inner=(datetime_1, datetime_2.replace(hour=13)), ) RangesModel.objects.create( timestamps=(datetime_1, datetime_2), timestamps_inner=(datetime_1, datetime_2), ) class ExclusionConstraintTests(PostgreSQLTestCase): def get_constraints(self, table): """Get the constraints on the table using a new cursor.""" with connection.cursor() as cursor: return connection.introspection.get_constraints(cursor, table) def test_invalid_condition(self): msg = 'ExclusionConstraint.condition must be a Q instance.' with self.assertRaisesMessage(ValueError, msg): ExclusionConstraint( index_type='GIST', name='exclude_invalid_condition', expressions=[(F('datespan'), RangeOperators.OVERLAPS)], condition=F('invalid'), ) def test_invalid_index_type(self): msg = 'Exclusion constraints only support GiST or SP-GiST indexes.' with self.assertRaisesMessage(ValueError, msg): ExclusionConstraint( index_type='gin', name='exclude_invalid_index_type', expressions=[(F('datespan'), RangeOperators.OVERLAPS)], ) def test_invalid_expressions(self): msg = 'The expressions must be a list of 2-tuples.' for expressions in (['foo'], [('foo')], [('foo_1', 'foo_2', 'foo_3')]): with self.subTest(expressions), self.assertRaisesMessage(ValueError, msg): ExclusionConstraint( index_type='GIST', name='exclude_invalid_expressions', expressions=expressions, ) def test_empty_expressions(self): msg = 'At least one expression is required to define an exclusion constraint.' for empty_expressions in (None, []): with self.subTest(empty_expressions), self.assertRaisesMessage(ValueError, msg): ExclusionConstraint( index_type='GIST', name='exclude_empty_expressions', expressions=empty_expressions, ) def test_repr(self): constraint = ExclusionConstraint( name='exclude_overlapping', expressions=[ (F('datespan'), RangeOperators.OVERLAPS), (F('room'), RangeOperators.EQUAL), ], ) self.assertEqual( repr(constraint), "<ExclusionConstraint: index_type=GIST, expressions=[" "(F(datespan), '&&'), (F(room), '=')]>", ) constraint = ExclusionConstraint( name='exclude_overlapping', expressions=[(F('datespan'), RangeOperators.ADJACENT_TO)], condition=Q(cancelled=False), index_type='SPGiST', ) self.assertEqual( repr(constraint), "<ExclusionConstraint: index_type=SPGiST, expressions=[" "(F(datespan), '-|-')], condition=(AND: ('cancelled', False))>", ) def test_eq(self): constraint_1 = ExclusionConstraint( name='exclude_overlapping', expressions=[ (F('datespan'), RangeOperators.OVERLAPS), (F('room'), RangeOperators.EQUAL), ], condition=Q(cancelled=False), ) constraint_2 = ExclusionConstraint( name='exclude_overlapping', expressions=[ ('datespan', RangeOperators.OVERLAPS), ('room', RangeOperators.EQUAL), ], ) constraint_3 = ExclusionConstraint( name='exclude_overlapping', expressions=[('datespan', RangeOperators.OVERLAPS)], condition=Q(cancelled=False), ) self.assertEqual(constraint_1, constraint_1) self.assertNotEqual(constraint_1, constraint_2) self.assertNotEqual(constraint_1, constraint_3) self.assertNotEqual(constraint_2, constraint_3) self.assertNotEqual(constraint_1, object()) def test_deconstruct(self): constraint = ExclusionConstraint( name='exclude_overlapping', expressions=[('datespan', RangeOperators.OVERLAPS), ('room', RangeOperators.EQUAL)], ) path, args, kwargs = constraint.deconstruct() self.assertEqual(path, 'django.contrib.postgres.constraints.ExclusionConstraint') self.assertEqual(args, ()) self.assertEqual(kwargs, { 'name': 'exclude_overlapping', 'expressions': [('datespan', RangeOperators.OVERLAPS), ('room', RangeOperators.EQUAL)], }) def test_deconstruct_index_type(self): constraint = ExclusionConstraint( name='exclude_overlapping', index_type='SPGIST', expressions=[('datespan', RangeOperators.OVERLAPS), ('room', RangeOperators.EQUAL)], ) path, args, kwargs = constraint.deconstruct() self.assertEqual(path, 'django.contrib.postgres.constraints.ExclusionConstraint') self.assertEqual(args, ()) self.assertEqual(kwargs, { 'name': 'exclude_overlapping', 'index_type': 'SPGIST', 'expressions': [('datespan', RangeOperators.OVERLAPS), ('room', RangeOperators.EQUAL)], }) def test_deconstruct_condition(self): constraint = ExclusionConstraint( name='exclude_overlapping', expressions=[('datespan', RangeOperators.OVERLAPS), ('room', RangeOperators.EQUAL)], condition=Q(cancelled=False), ) path, args, kwargs = constraint.deconstruct() self.assertEqual(path, 'django.contrib.postgres.constraints.ExclusionConstraint') self.assertEqual(args, ()) self.assertEqual(kwargs, { 'name': 'exclude_overlapping', 'expressions': [('datespan', RangeOperators.OVERLAPS), ('room', RangeOperators.EQUAL)], 'condition': Q(cancelled=False), }) def _test_range_overlaps(self, constraint): # Create exclusion constraint. self.assertNotIn(constraint.name, self.get_constraints(HotelReservation._meta.db_table)) with connection.schema_editor() as editor: editor.add_constraint(HotelReservation, constraint) self.assertIn(constraint.name, self.get_constraints(HotelReservation._meta.db_table)) # Add initial reservations. room101 = Room.objects.create(number=101) room102 = Room.objects.create(number=102) datetimes = [ timezone.datetime(2018, 6, 20), timezone.datetime(2018, 6, 24), timezone.datetime(2018, 6, 26), timezone.datetime(2018, 6, 28), timezone.datetime(2018, 6, 29), ] HotelReservation.objects.create( datespan=DateRange(datetimes[0].date(), datetimes[1].date()), start=datetimes[0], end=datetimes[1], room=room102, ) HotelReservation.objects.create( datespan=DateRange(datetimes[1].date(), datetimes[3].date()), start=datetimes[1], end=datetimes[3], room=room102, ) # Overlap dates. with self.assertRaises(IntegrityError), transaction.atomic(): reservation = HotelReservation( datespan=(datetimes[1].date(), datetimes[2].date()), start=datetimes[1], end=datetimes[2], room=room102, ) reservation.save() # Valid range. HotelReservation.objects.bulk_create([ # Other room. HotelReservation( datespan=(datetimes[1].date(), datetimes[2].date()), start=datetimes[1], end=datetimes[2], room=room101, ), # Cancelled reservation. HotelReservation( datespan=(datetimes[1].date(), datetimes[1].date()), start=datetimes[1], end=datetimes[2], room=room102, cancelled=True, ), # Other adjacent dates. HotelReservation( datespan=(datetimes[3].date(), datetimes[4].date()), start=datetimes[3], end=datetimes[4], room=room102, ), ]) def test_range_overlaps_custom(self): class TsTzRange(Func): function = 'TSTZRANGE' output_field = DateTimeRangeField() constraint = ExclusionConstraint( name='exclude_overlapping_reservations_custom', expressions=[ (TsTzRange('start', 'end', RangeBoundary()), RangeOperators.OVERLAPS), ('room', RangeOperators.EQUAL) ], condition=Q(cancelled=False), ) self._test_range_overlaps(constraint) def test_range_overlaps(self): constraint = ExclusionConstraint( name='exclude_overlapping_reservations', expressions=[ (F('datespan'), RangeOperators.OVERLAPS), ('room', RangeOperators.EQUAL) ], condition=Q(cancelled=False), ) self._test_range_overlaps(constraint) def test_range_adjacent(self): constraint_name = 'ints_adjacent' self.assertNotIn(constraint_name, self.get_constraints(RangesModel._meta.db_table)) constraint = ExclusionConstraint( name=constraint_name, expressions=[('ints', RangeOperators.ADJACENT_TO)], ) with connection.schema_editor() as editor: editor.add_constraint(RangesModel, constraint) self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table)) RangesModel.objects.create(ints=(20, 50)) with self.assertRaises(IntegrityError), transaction.atomic(): RangesModel.objects.create(ints=(10, 20)) RangesModel.objects.create(ints=(10, 19)) RangesModel.objects.create(ints=(51, 60))
953071308ed68493db468477d936a1ad8793eeb75f5d08f26b9cbc3b9f297694
import os import shutil import sys import tempfile import unittest from io import StringIO from unittest import mock from django.conf import settings from django.contrib.staticfiles import finders, storage from django.contrib.staticfiles.management.commands.collectstatic import ( Command as CollectstaticCommand, ) from django.core.cache.backends.base import BaseCache from django.core.management import call_command from django.test import SimpleTestCase, ignore_warnings, override_settings from django.utils.deprecation import RemovedInDjango31Warning from .cases import CollectionTestCase from .settings import TEST_ROOT def hashed_file_path(test, path): fullpath = test.render_template(test.static_template_snippet(path)) return fullpath.replace(settings.STATIC_URL, '') class TestHashedFiles: hashed_file_path = hashed_file_path def setUp(self): self._max_post_process_passes = storage.staticfiles_storage.max_post_process_passes super().setUp() def tearDown(self): # Clear hashed files to avoid side effects among tests. storage.staticfiles_storage.hashed_files.clear() storage.staticfiles_storage.max_post_process_passes = self._max_post_process_passes def assertPostCondition(self): """ Assert post conditions for a test are met. Must be manually called at the end of each test. """ pass def test_template_tag_return(self): self.assertStaticRaises(ValueError, "does/not/exist.png", "/static/does/not/exist.png") self.assertStaticRenders("test/file.txt", "/static/test/file.dad0999e4f8f.txt") self.assertStaticRenders("test/file.txt", "/static/test/file.dad0999e4f8f.txt", asvar=True) self.assertStaticRenders("cached/styles.css", "/static/cached/styles.5e0040571e1a.css") self.assertStaticRenders("path/", "/static/path/") self.assertStaticRenders("path/?query", "/static/path/?query") self.assertPostCondition() def test_template_tag_simple_content(self): relpath = self.hashed_file_path("cached/styles.css") self.assertEqual(relpath, "cached/styles.5e0040571e1a.css") with storage.staticfiles_storage.open(relpath) as relfile: content = relfile.read() self.assertNotIn(b"cached/other.css", content) self.assertIn(b"other.d41d8cd98f00.css", content) self.assertPostCondition() def test_path_ignored_completely(self): relpath = self.hashed_file_path("cached/css/ignored.css") self.assertEqual(relpath, "cached/css/ignored.554da52152af.css") with storage.staticfiles_storage.open(relpath) as relfile: content = relfile.read() self.assertIn(b'#foobar', content) self.assertIn(b'http:foobar', content) self.assertIn(b'https:foobar', content) self.assertIn(b'data:foobar', content) self.assertIn(b'chrome:foobar', content) self.assertIn(b'//foobar', content) self.assertPostCondition() def test_path_with_querystring(self): relpath = self.hashed_file_path("cached/styles.css?spam=eggs") self.assertEqual(relpath, "cached/styles.5e0040571e1a.css?spam=eggs") with storage.staticfiles_storage.open("cached/styles.5e0040571e1a.css") as relfile: content = relfile.read() self.assertNotIn(b"cached/other.css", content) self.assertIn(b"other.d41d8cd98f00.css", content) self.assertPostCondition() def test_path_with_fragment(self): relpath = self.hashed_file_path("cached/styles.css#eggs") self.assertEqual(relpath, "cached/styles.5e0040571e1a.css#eggs") with storage.staticfiles_storage.open("cached/styles.5e0040571e1a.css") as relfile: content = relfile.read() self.assertNotIn(b"cached/other.css", content) self.assertIn(b"other.d41d8cd98f00.css", content) self.assertPostCondition() def test_path_with_querystring_and_fragment(self): relpath = self.hashed_file_path("cached/css/fragments.css") self.assertEqual(relpath, "cached/css/fragments.a60c0e74834f.css") with storage.staticfiles_storage.open(relpath) as relfile: content = relfile.read() self.assertIn(b'fonts/font.b9b105392eb8.eot?#iefix', content) self.assertIn(b'fonts/font.b8d603e42714.svg#webfontIyfZbseF', content) self.assertIn(b'fonts/font.b8d603e42714.svg#path/to/../../fonts/font.svg', content) self.assertIn(b'data:font/woff;charset=utf-8;base64,d09GRgABAAAAADJoAA0AAAAAR2QAAQAAAAAAAAAAAAA', content) self.assertIn(b'#default#VML', content) self.assertPostCondition() def test_template_tag_absolute(self): relpath = self.hashed_file_path("cached/absolute.css") self.assertEqual(relpath, "cached/absolute.eb04def9f9a4.css") with storage.staticfiles_storage.open(relpath) as relfile: content = relfile.read() self.assertNotIn(b"/static/cached/styles.css", content) self.assertIn(b"/static/cached/styles.5e0040571e1a.css", content) self.assertNotIn(b"/static/styles_root.css", content) self.assertIn(b"/static/styles_root.401f2509a628.css", content) self.assertIn(b'/static/cached/img/relative.acae32e4532b.png', content) self.assertPostCondition() def test_template_tag_absolute_root(self): """ Like test_template_tag_absolute, but for a file in STATIC_ROOT (#26249). """ relpath = self.hashed_file_path("absolute_root.css") self.assertEqual(relpath, "absolute_root.f821df1b64f7.css") with storage.staticfiles_storage.open(relpath) as relfile: content = relfile.read() self.assertNotIn(b"/static/styles_root.css", content) self.assertIn(b"/static/styles_root.401f2509a628.css", content) self.assertPostCondition() def test_template_tag_relative(self): relpath = self.hashed_file_path("cached/relative.css") self.assertEqual(relpath, "cached/relative.c3e9e1ea6f2e.css") with storage.staticfiles_storage.open(relpath) as relfile: content = relfile.read() self.assertNotIn(b"../cached/styles.css", content) self.assertNotIn(b'@import "styles.css"', content) self.assertNotIn(b'url(img/relative.png)', content) self.assertIn(b'url("img/relative.acae32e4532b.png")', content) self.assertIn(b"../cached/styles.5e0040571e1a.css", content) self.assertPostCondition() def test_import_replacement(self): "See #18050" relpath = self.hashed_file_path("cached/import.css") self.assertEqual(relpath, "cached/import.f53576679e5a.css") with storage.staticfiles_storage.open(relpath) as relfile: self.assertIn(b"""import url("styles.5e0040571e1a.css")""", relfile.read()) self.assertPostCondition() def test_template_tag_deep_relative(self): relpath = self.hashed_file_path("cached/css/window.css") self.assertEqual(relpath, "cached/css/window.5d5c10836967.css") with storage.staticfiles_storage.open(relpath) as relfile: content = relfile.read() self.assertNotIn(b'url(img/window.png)', content) self.assertIn(b'url("img/window.acae32e4532b.png")', content) self.assertPostCondition() def test_template_tag_url(self): relpath = self.hashed_file_path("cached/url.css") self.assertEqual(relpath, "cached/url.902310b73412.css") with storage.staticfiles_storage.open(relpath) as relfile: self.assertIn(b"https://", relfile.read()) self.assertPostCondition() @override_settings( STATICFILES_DIRS=[os.path.join(TEST_ROOT, 'project', 'loop')], STATICFILES_FINDERS=['django.contrib.staticfiles.finders.FileSystemFinder'], ) def test_import_loop(self): finders.get_finder.cache_clear() err = StringIO() with self.assertRaisesMessage(RuntimeError, 'Max post-process passes exceeded'): call_command('collectstatic', interactive=False, verbosity=0, stderr=err) self.assertEqual("Post-processing 'All' failed!\n\n", err.getvalue()) self.assertPostCondition() def test_post_processing(self): """ post_processing behaves correctly. Files that are alterable should always be post-processed; files that aren't should be skipped. collectstatic has already been called once in setUp() for this testcase, therefore we check by verifying behavior on a second run. """ collectstatic_args = { 'interactive': False, 'verbosity': 0, 'link': False, 'clear': False, 'dry_run': False, 'post_process': True, 'use_default_ignore_patterns': True, 'ignore_patterns': ['*.ignoreme'], } collectstatic_cmd = CollectstaticCommand() collectstatic_cmd.set_options(**collectstatic_args) stats = collectstatic_cmd.collect() self.assertIn(os.path.join('cached', 'css', 'window.css'), stats['post_processed']) self.assertIn(os.path.join('cached', 'css', 'img', 'window.png'), stats['unmodified']) self.assertIn(os.path.join('test', 'nonascii.css'), stats['post_processed']) self.assertPostCondition() def test_css_import_case_insensitive(self): relpath = self.hashed_file_path("cached/styles_insensitive.css") self.assertEqual(relpath, "cached/styles_insensitive.3fa427592a53.css") with storage.staticfiles_storage.open(relpath) as relfile: content = relfile.read() self.assertNotIn(b"cached/other.css", content) self.assertIn(b"other.d41d8cd98f00.css", content) self.assertPostCondition() @override_settings( STATICFILES_DIRS=[os.path.join(TEST_ROOT, 'project', 'faulty')], STATICFILES_FINDERS=['django.contrib.staticfiles.finders.FileSystemFinder'], ) def test_post_processing_failure(self): """ post_processing indicates the origin of the error when it fails. """ finders.get_finder.cache_clear() err = StringIO() with self.assertRaises(Exception): call_command('collectstatic', interactive=False, verbosity=0, stderr=err) self.assertEqual("Post-processing 'faulty.css' failed!\n\n", err.getvalue()) self.assertPostCondition() @ignore_warnings(category=RemovedInDjango31Warning) @override_settings( STATICFILES_STORAGE='django.contrib.staticfiles.storage.CachedStaticFilesStorage', ) class TestCollectionCachedStorage(TestHashedFiles, CollectionTestCase): """ Tests for the Cache busting storage """ def test_cache_invalidation(self): name = "cached/styles.css" hashed_name = "cached/styles.5e0040571e1a.css" # check if the cache is filled correctly as expected cache_key = storage.staticfiles_storage.hash_key(name) cached_name = storage.staticfiles_storage.hashed_files.get(cache_key) self.assertEqual(self.hashed_file_path(name), cached_name) # clearing the cache to make sure we re-set it correctly in the url method storage.staticfiles_storage.hashed_files.clear() cached_name = storage.staticfiles_storage.hashed_files.get(cache_key) self.assertIsNone(cached_name) self.assertEqual(self.hashed_file_path(name), hashed_name) cached_name = storage.staticfiles_storage.hashed_files.get(cache_key) self.assertEqual(cached_name, hashed_name) # Check files that had to be hashed multiple times since their content # includes other files that were hashed. name = 'cached/relative.css' hashed_name = 'cached/relative.c3e9e1ea6f2e.css' cache_key = storage.staticfiles_storage.hash_key(name) cached_name = storage.staticfiles_storage.hashed_files.get(cache_key) self.assertIsNone(cached_name) self.assertEqual(self.hashed_file_path(name), hashed_name) cached_name = storage.staticfiles_storage.hashed_files.get(cache_key) self.assertEqual(cached_name, hashed_name) def test_cache_key_memcache_validation(self): """ Handle cache key creation correctly, see #17861. """ name = ( "/some crazy/long filename/ with spaces Here and ?#%#$/other/stuff" "/some crazy/long filename/ with spaces Here and ?#%#$/other/stuff" "/some crazy/long filename/ with spaces Here and ?#%#$/other/stuff" "/some crazy/long filename/ with spaces Here and ?#%#$/other/stuff" "/some crazy/long filename/ with spaces Here and ?#%#$/other/stuff" "/some crazy/\x16\xb4" ) cache_key = storage.staticfiles_storage.hash_key(name) cache_validator = BaseCache({}) cache_validator.validate_key(cache_key) self.assertEqual(cache_key, 'staticfiles:821ea71ef36f95b3922a77f7364670e7') def test_corrupt_intermediate_files(self): configured_storage = storage.staticfiles_storage # Clear cache to force rehashing of the files configured_storage.hashed_files.clear() # Simulate a corrupt chain of intermediate files by ensuring they don't # resolve before the max post-process count, which would normally be # high enough. configured_storage.max_post_process_passes = 1 # File without intermediates that can be rehashed without a problem. self.hashed_file_path('cached/css/img/window.png') # File with too many intermediates to rehash with the low max # post-process passes. err_msg = "The name 'cached/styles.css' could not be hashed with %r." % (configured_storage._wrapped,) with self.assertRaisesMessage(ValueError, err_msg): self.hashed_file_path('cached/styles.css') class TestCachedStaticFilesStorageDeprecation(SimpleTestCase): def test_warning(self): from django.contrib.staticfiles.storage import CachedStaticFilesStorage from django.utils.deprecation import RemovedInDjango31Warning msg = ( 'CachedStaticFilesStorage is deprecated in favor of ' 'ManifestStaticFilesStorage.' ) with self.assertRaisesMessage(RemovedInDjango31Warning, msg): CachedStaticFilesStorage() @override_settings(STATICFILES_STORAGE='staticfiles_tests.storage.ExtraPatternsStorage') class TestExtraPatternsStorage(CollectionTestCase): def setUp(self): storage.staticfiles_storage.hashed_files.clear() # avoid cache interference super().setUp() def cached_file_path(self, path): fullpath = self.render_template(self.static_template_snippet(path)) return fullpath.replace(settings.STATIC_URL, '') def test_multi_extension_patterns(self): """ With storage classes having several file extension patterns, only the files matching a specific file pattern should be affected by the substitution (#19670). """ # CSS files shouldn't be touched by JS patterns. relpath = self.cached_file_path("cached/import.css") self.assertEqual(relpath, "cached/import.f53576679e5a.css") with storage.staticfiles_storage.open(relpath) as relfile: self.assertIn(b'import url("styles.5e0040571e1a.css")', relfile.read()) # Confirm JS patterns have been applied to JS files. relpath = self.cached_file_path("cached/test.js") self.assertEqual(relpath, "cached/test.388d7a790d46.js") with storage.staticfiles_storage.open(relpath) as relfile: self.assertIn(b'JS_URL("import.f53576679e5a.css")', relfile.read()) @override_settings( STATICFILES_STORAGE='django.contrib.staticfiles.storage.ManifestStaticFilesStorage', ) class TestCollectionManifestStorage(TestHashedFiles, CollectionTestCase): """ Tests for the Cache busting storage """ def setUp(self): super().setUp() temp_dir = tempfile.mkdtemp() os.makedirs(os.path.join(temp_dir, 'test')) self._clear_filename = os.path.join(temp_dir, 'test', 'cleared.txt') with open(self._clear_filename, 'w') as f: f.write('to be deleted in one test') self.patched_settings = self.settings( STATICFILES_DIRS=settings.STATICFILES_DIRS + [temp_dir], ) self.patched_settings.enable() self.addCleanup(shutil.rmtree, temp_dir) self._manifest_strict = storage.staticfiles_storage.manifest_strict def tearDown(self): self.patched_settings.disable() if os.path.exists(self._clear_filename): os.unlink(self._clear_filename) storage.staticfiles_storage.manifest_strict = self._manifest_strict super().tearDown() def assertPostCondition(self): hashed_files = storage.staticfiles_storage.hashed_files # The in-memory version of the manifest matches the one on disk # since a properly created manifest should cover all filenames. if hashed_files: manifest = storage.staticfiles_storage.load_manifest() self.assertEqual(hashed_files, manifest) def test_manifest_exists(self): filename = storage.staticfiles_storage.manifest_name path = storage.staticfiles_storage.path(filename) self.assertTrue(os.path.exists(path)) def test_manifest_does_not_exist(self): storage.staticfiles_storage.manifest_name = 'does.not.exist.json' self.assertIsNone(storage.staticfiles_storage.read_manifest()) def test_manifest_does_not_ignore_permission_error(self): with mock.patch('builtins.open', side_effect=PermissionError): with self.assertRaises(PermissionError): storage.staticfiles_storage.read_manifest() def test_loaded_cache(self): self.assertNotEqual(storage.staticfiles_storage.hashed_files, {}) manifest_content = storage.staticfiles_storage.read_manifest() self.assertIn( '"version": "%s"' % storage.staticfiles_storage.manifest_version, manifest_content ) def test_parse_cache(self): hashed_files = storage.staticfiles_storage.hashed_files manifest = storage.staticfiles_storage.load_manifest() self.assertEqual(hashed_files, manifest) def test_clear_empties_manifest(self): cleared_file_name = storage.staticfiles_storage.clean_name(os.path.join('test', 'cleared.txt')) # collect the additional file self.run_collectstatic() hashed_files = storage.staticfiles_storage.hashed_files self.assertIn(cleared_file_name, hashed_files) manifest_content = storage.staticfiles_storage.load_manifest() self.assertIn(cleared_file_name, manifest_content) original_path = storage.staticfiles_storage.path(cleared_file_name) self.assertTrue(os.path.exists(original_path)) # delete the original file form the app, collect with clear os.unlink(self._clear_filename) self.run_collectstatic(clear=True) self.assertFileNotFound(original_path) hashed_files = storage.staticfiles_storage.hashed_files self.assertNotIn(cleared_file_name, hashed_files) manifest_content = storage.staticfiles_storage.load_manifest() self.assertNotIn(cleared_file_name, manifest_content) def test_missing_entry(self): missing_file_name = 'cached/missing.css' configured_storage = storage.staticfiles_storage self.assertNotIn(missing_file_name, configured_storage.hashed_files) # File name not found in manifest with self.assertRaisesMessage(ValueError, "Missing staticfiles manifest entry for '%s'" % missing_file_name): self.hashed_file_path(missing_file_name) configured_storage.manifest_strict = False # File doesn't exist on disk err_msg = "The file '%s' could not be found with %r." % (missing_file_name, configured_storage._wrapped) with self.assertRaisesMessage(ValueError, err_msg): self.hashed_file_path(missing_file_name) content = StringIO() content.write('Found') configured_storage.save(missing_file_name, content) # File exists on disk self.hashed_file_path(missing_file_name) def test_intermediate_files(self): cached_files = os.listdir(os.path.join(settings.STATIC_ROOT, 'cached')) # Intermediate files shouldn't be created for reference. self.assertEqual( len([ cached_file for cached_file in cached_files if cached_file.startswith('relative.') ]), 2, ) @override_settings(STATICFILES_STORAGE='staticfiles_tests.storage.SimpleStorage') class TestCollectionSimpleStorage(CollectionTestCase): hashed_file_path = hashed_file_path def setUp(self): storage.staticfiles_storage.hashed_files.clear() # avoid cache interference super().setUp() def test_template_tag_return(self): self.assertStaticRaises(ValueError, "does/not/exist.png", "/static/does/not/exist.png") self.assertStaticRenders("test/file.txt", "/static/test/file.deploy12345.txt") self.assertStaticRenders("cached/styles.css", "/static/cached/styles.deploy12345.css") self.assertStaticRenders("path/", "/static/path/") self.assertStaticRenders("path/?query", "/static/path/?query") def test_template_tag_simple_content(self): relpath = self.hashed_file_path("cached/styles.css") self.assertEqual(relpath, "cached/styles.deploy12345.css") with storage.staticfiles_storage.open(relpath) as relfile: content = relfile.read() self.assertNotIn(b"cached/other.css", content) self.assertIn(b"other.deploy12345.css", content) class CustomStaticFilesStorage(storage.StaticFilesStorage): """ Used in TestStaticFilePermissions """ def __init__(self, *args, **kwargs): kwargs['file_permissions_mode'] = 0o640 kwargs['directory_permissions_mode'] = 0o740 super().__init__(*args, **kwargs) @unittest.skipIf(sys.platform.startswith('win'), "Windows only partially supports chmod.") class TestStaticFilePermissions(CollectionTestCase): command_params = { 'interactive': False, 'verbosity': 0, 'ignore_patterns': ['*.ignoreme'], } def setUp(self): self.umask = 0o027 self.old_umask = os.umask(self.umask) super().setUp() def tearDown(self): os.umask(self.old_umask) super().tearDown() # Don't run collectstatic command in this test class. def run_collectstatic(self, **kwargs): pass @override_settings( FILE_UPLOAD_PERMISSIONS=0o655, FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o765, ) def test_collect_static_files_permissions(self): call_command('collectstatic', **self.command_params) test_file = os.path.join(settings.STATIC_ROOT, "test.txt") test_dir = os.path.join(settings.STATIC_ROOT, "subdir") file_mode = os.stat(test_file)[0] & 0o777 dir_mode = os.stat(test_dir)[0] & 0o777 self.assertEqual(file_mode, 0o655) self.assertEqual(dir_mode, 0o765) @override_settings( FILE_UPLOAD_PERMISSIONS=None, FILE_UPLOAD_DIRECTORY_PERMISSIONS=None, ) def test_collect_static_files_default_permissions(self): call_command('collectstatic', **self.command_params) test_file = os.path.join(settings.STATIC_ROOT, "test.txt") test_dir = os.path.join(settings.STATIC_ROOT, "subdir") file_mode = os.stat(test_file)[0] & 0o777 dir_mode = os.stat(test_dir)[0] & 0o777 self.assertEqual(file_mode, 0o666 & ~self.umask) self.assertEqual(dir_mode, 0o777 & ~self.umask) @override_settings( FILE_UPLOAD_PERMISSIONS=0o655, FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o765, STATICFILES_STORAGE='staticfiles_tests.test_storage.CustomStaticFilesStorage', ) def test_collect_static_files_subclass_of_static_storage(self): call_command('collectstatic', **self.command_params) test_file = os.path.join(settings.STATIC_ROOT, "test.txt") test_dir = os.path.join(settings.STATIC_ROOT, "subdir") file_mode = os.stat(test_file)[0] & 0o777 dir_mode = os.stat(test_dir)[0] & 0o777 self.assertEqual(file_mode, 0o640) self.assertEqual(dir_mode, 0o740) @override_settings( STATICFILES_STORAGE='django.contrib.staticfiles.storage.ManifestStaticFilesStorage', ) class TestCollectionHashedFilesCache(CollectionTestCase): """ Files referenced from CSS use the correct final hashed name regardless of the order in which the files are post-processed. """ hashed_file_path = hashed_file_path def setUp(self): super().setUp() self._temp_dir = temp_dir = tempfile.mkdtemp() os.makedirs(os.path.join(temp_dir, 'test')) self.addCleanup(shutil.rmtree, temp_dir) def _get_filename_path(self, filename): return os.path.join(self._temp_dir, 'test', filename) def test_file_change_after_collectstatic(self): # Create initial static files. file_contents = ( ('foo.png', 'foo'), ('bar.css', 'url("foo.png")\nurl("xyz.png")'), ('xyz.png', 'xyz'), ) for filename, content in file_contents: with open(self._get_filename_path(filename), 'w') as f: f.write(content) with self.modify_settings(STATICFILES_DIRS={'append': self._temp_dir}): finders.get_finder.cache_clear() err = StringIO() # First collectstatic run. call_command('collectstatic', interactive=False, verbosity=0, stderr=err) relpath = self.hashed_file_path('test/bar.css') with storage.staticfiles_storage.open(relpath) as relfile: content = relfile.read() self.assertIn(b'foo.acbd18db4cc2.png', content) self.assertIn(b'xyz.d16fb36f0911.png', content) # Change the contents of the png files. for filename in ('foo.png', 'xyz.png'): with open(self._get_filename_path(filename), 'w+b') as f: f.write(b"new content of file to change its hash") # The hashes of the png files in the CSS file are updated after # a second collectstatic. call_command('collectstatic', interactive=False, verbosity=0, stderr=err) relpath = self.hashed_file_path('test/bar.css') with storage.staticfiles_storage.open(relpath) as relfile: content = relfile.read() self.assertIn(b'foo.57a5cb9ba68d.png', content) self.assertIn(b'xyz.57a5cb9ba68d.png', content)
4ea919c0bf8ab3c007006e29ce1b360cc5289c78e68e86e68ab939a9f87a7b43
import datetime import sys import unittest from unittest import mock from urllib.parse import quote_plus from django.test import SimpleTestCase from django.utils.encoding import ( DjangoUnicodeDecodeError, escape_uri_path, filepath_to_uri, force_bytes, force_str, get_system_encoding, iri_to_uri, repercent_broken_unicode, smart_bytes, smart_str, uri_to_iri, ) from django.utils.functional import SimpleLazyObject from django.utils.translation import gettext_lazy class TestEncodingUtils(SimpleTestCase): def test_force_str_exception(self): """ Broken __str__ actually raises an error. """ class MyString: def __str__(self): return b'\xc3\xb6\xc3\xa4\xc3\xbc' # str(s) raises a TypeError if the result is not a text type. with self.assertRaises(TypeError): force_str(MyString()) def test_force_str_lazy(self): s = SimpleLazyObject(lambda: 'x') self.assertIs(type(force_str(s)), str) def test_force_str_DjangoUnicodeDecodeError(self): msg = ( "'utf-8' codec can't decode byte 0xff in position 0: invalid " "start byte. You passed in b'\\xff' (<class 'bytes'>)" ) with self.assertRaisesMessage(DjangoUnicodeDecodeError, msg): force_str(b'\xff') def test_force_bytes_exception(self): """ force_bytes knows how to convert to bytes an exception containing non-ASCII characters in its args. """ error_msg = "This is an exception, voilà" exc = ValueError(error_msg) self.assertEqual(force_bytes(exc), error_msg.encode()) self.assertEqual(force_bytes(exc, encoding='ascii', errors='ignore'), b'This is an exception, voil') def test_force_bytes_strings_only(self): today = datetime.date.today() self.assertEqual(force_bytes(today, strings_only=True), today) def test_force_bytes_encoding(self): error_msg = 'This is an exception, voilà'.encode() result = force_bytes(error_msg, encoding='ascii', errors='ignore') self.assertEqual(result, b'This is an exception, voil') def test_force_bytes_memory_view(self): data = b'abc' result = force_bytes(memoryview(data)) # Type check is needed because memoryview(bytes) == bytes. self.assertIs(type(result), bytes) self.assertEqual(result, data) def test_smart_bytes(self): class Test: def __str__(self): return 'ŠĐĆŽćžšđ' lazy_func = gettext_lazy('x') self.assertIs(smart_bytes(lazy_func), lazy_func) self.assertEqual(smart_bytes(Test()), b'\xc5\xa0\xc4\x90\xc4\x86\xc5\xbd\xc4\x87\xc5\xbe\xc5\xa1\xc4\x91') self.assertEqual(smart_bytes(1), b'1') self.assertEqual(smart_bytes('foo'), b'foo') def test_smart_str(self): class Test: def __str__(self): return 'ŠĐĆŽćžšđ' lazy_func = gettext_lazy('x') self.assertIs(smart_str(lazy_func), lazy_func) self.assertEqual(smart_str(Test()), '\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111') self.assertEqual(smart_str(1), '1') self.assertEqual(smart_str('foo'), 'foo') def test_get_default_encoding(self): with mock.patch('locale.getdefaultlocale', side_effect=Exception): self.assertEqual(get_system_encoding(), 'ascii') def test_repercent_broken_unicode_recursion_error(self): # Prepare a string long enough to force a recursion error if the tested # function uses recursion. data = b'\xfc' * sys.getrecursionlimit() try: self.assertEqual(repercent_broken_unicode(data), b'%FC' * sys.getrecursionlimit()) except RecursionError: self.fail('Unexpected RecursionError raised.') class TestRFC3987IEncodingUtils(unittest.TestCase): def test_filepath_to_uri(self): self.assertEqual(filepath_to_uri(None), None) self.assertEqual(filepath_to_uri('upload\\чубака.mp4'), 'upload/%D1%87%D1%83%D0%B1%D0%B0%D0%BA%D0%B0.mp4') def test_iri_to_uri(self): cases = [ # Valid UTF-8 sequences are encoded. ('red%09rosé#red', 'red%09ros%C3%A9#red'), ('/blog/for/Jürgen Münster/', '/blog/for/J%C3%BCrgen%20M%C3%BCnster/'), ('locations/%s' % quote_plus('Paris & Orléans'), 'locations/Paris+%26+Orl%C3%A9ans'), # Reserved chars remain unescaped. ('%&', '%&'), ('red&♥ros%#red', 'red&%E2%99%A5ros%#red'), (gettext_lazy('red&♥ros%#red'), 'red&%E2%99%A5ros%#red'), ] for iri, uri in cases: self.assertEqual(iri_to_uri(iri), uri) # Test idempotency. self.assertEqual(iri_to_uri(iri_to_uri(iri)), uri) def test_uri_to_iri(self): cases = [ (None, None), # Valid UTF-8 sequences are decoded. ('/%e2%89%Ab%E2%99%a5%E2%89%aB/', '/≫♥≫/'), ('/%E2%99%A5%E2%99%A5/?utf8=%E2%9C%93', '/♥♥/?utf8=✓'), ('/%41%5a%6B/', '/AZk/'), # Reserved and non-URL valid ASCII chars are not decoded. ('/%25%20%02%41%7b/', '/%25%20%02A%7b/'), # Broken UTF-8 sequences remain escaped. ('/%AAd%AAj%AAa%AAn%AAg%AAo%AA/', '/%AAd%AAj%AAa%AAn%AAg%AAo%AA/'), ('/%E2%99%A5%E2%E2%99%A5/', '/♥%E2♥/'), ('/%E2%99%A5%E2%99%E2%99%A5/', '/♥%E2%99♥/'), ('/%E2%E2%99%A5%E2%99%A5%99/', '/%E2♥♥%99/'), ('/%E2%99%A5%E2%99%A5/?utf8=%9C%93%E2%9C%93%9C%93', '/♥♥/?utf8=%9C%93✓%9C%93'), ] for uri, iri in cases: self.assertEqual(uri_to_iri(uri), iri) # Test idempotency. self.assertEqual(uri_to_iri(uri_to_iri(uri)), iri) def test_complementarity(self): cases = [ ('/blog/for/J%C3%BCrgen%20M%C3%BCnster/', '/blog/for/J\xfcrgen%20M\xfcnster/'), ('%&', '%&'), ('red&%E2%99%A5ros%#red', 'red&♥ros%#red'), ('/%E2%99%A5%E2%99%A5/', '/♥♥/'), ('/%E2%99%A5%E2%99%A5/?utf8=%E2%9C%93', '/♥♥/?utf8=✓'), ('/%25%20%02%7b/', '/%25%20%02%7b/'), ('/%AAd%AAj%AAa%AAn%AAg%AAo%AA/', '/%AAd%AAj%AAa%AAn%AAg%AAo%AA/'), ('/%E2%99%A5%E2%E2%99%A5/', '/♥%E2♥/'), ('/%E2%99%A5%E2%99%E2%99%A5/', '/♥%E2%99♥/'), ('/%E2%E2%99%A5%E2%99%A5%99/', '/%E2♥♥%99/'), ('/%E2%99%A5%E2%99%A5/?utf8=%9C%93%E2%9C%93%9C%93', '/♥♥/?utf8=%9C%93✓%9C%93'), ] for uri, iri in cases: self.assertEqual(iri_to_uri(uri_to_iri(uri)), uri) self.assertEqual(uri_to_iri(iri_to_uri(iri)), iri) def test_escape_uri_path(self): self.assertEqual( escape_uri_path('/;some/=awful/?path/:with/@lots/&of/+awful/chars'), '/%3Bsome/%3Dawful/%3Fpath/:with/@lots/&of/+awful/chars' ) self.assertEqual(escape_uri_path('/foo#bar'), '/foo%23bar') self.assertEqual(escape_uri_path('/foo?bar'), '/foo%3Fbar')
ed84f3b47873428eb0d88feb6637744bb6ba818c3d700f2c7337c4211b1b8ddf
import contextlib import os import py_compile import shutil import sys import tempfile import threading import time import types import weakref import zipfile from importlib import import_module from pathlib import Path from unittest import mock, skip, skipIf from django.apps.registry import Apps from django.test import SimpleTestCase from django.test.utils import extend_sys_path from django.utils import autoreload from django.utils.autoreload import WatchmanUnavailable from .utils import on_macos_with_hfs class TestIterModulesAndFiles(SimpleTestCase): def import_and_cleanup(self, name): import_module(name) self.addCleanup(lambda: sys.path_importer_cache.clear()) self.addCleanup(lambda: sys.modules.pop(name, None)) def clear_autoreload_caches(self): autoreload.iter_modules_and_files.cache_clear() def assertFileFound(self, filename): # Some temp directories are symlinks. Python resolves these fully while # importing. resolved_filename = filename.resolve() self.clear_autoreload_caches() # Test uncached access self.assertIn(resolved_filename, list(autoreload.iter_all_python_module_files())) # Test cached access self.assertIn(resolved_filename, list(autoreload.iter_all_python_module_files())) self.assertEqual(autoreload.iter_modules_and_files.cache_info().hits, 1) def assertFileNotFound(self, filename): resolved_filename = filename.resolve() self.clear_autoreload_caches() # Test uncached access self.assertNotIn(resolved_filename, list(autoreload.iter_all_python_module_files())) # Test cached access self.assertNotIn(resolved_filename, list(autoreload.iter_all_python_module_files())) self.assertEqual(autoreload.iter_modules_and_files.cache_info().hits, 1) def temporary_file(self, filename): dirname = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, dirname) return Path(dirname) / filename def test_paths_are_pathlib_instances(self): for filename in autoreload.iter_all_python_module_files(): self.assertIsInstance(filename, Path) def test_file_added(self): """ When a file is added, it's returned by iter_all_python_module_files(). """ filename = self.temporary_file('test_deleted_removed_module.py') filename.touch() with extend_sys_path(str(filename.parent)): self.import_and_cleanup('test_deleted_removed_module') self.assertFileFound(filename.absolute()) def test_check_errors(self): """ When a file containing an error is imported in a function wrapped by check_errors(), gen_filenames() returns it. """ filename = self.temporary_file('test_syntax_error.py') filename.write_text("Ceci n'est pas du Python.") with extend_sys_path(str(filename.parent)): with self.assertRaises(SyntaxError): autoreload.check_errors(import_module)('test_syntax_error') self.assertFileFound(filename) def test_check_errors_catches_all_exceptions(self): """ Since Python may raise arbitrary exceptions when importing code, check_errors() must catch Exception, not just some subclasses. """ filename = self.temporary_file('test_exception.py') filename.write_text('raise Exception') with extend_sys_path(str(filename.parent)): with self.assertRaises(Exception): autoreload.check_errors(import_module)('test_exception') self.assertFileFound(filename) def test_zip_reload(self): """ Modules imported from zipped files have their archive location included in the result. """ zip_file = self.temporary_file('zip_import.zip') with zipfile.ZipFile(str(zip_file), 'w', zipfile.ZIP_DEFLATED) as zipf: zipf.writestr('test_zipped_file.py', '') with extend_sys_path(str(zip_file)): self.import_and_cleanup('test_zipped_file') self.assertFileFound(zip_file) def test_bytecode_conversion_to_source(self): """.pyc and .pyo files are included in the files list.""" filename = self.temporary_file('test_compiled.py') filename.touch() compiled_file = Path(py_compile.compile(str(filename), str(filename.with_suffix('.pyc')))) filename.unlink() with extend_sys_path(str(compiled_file.parent)): self.import_and_cleanup('test_compiled') self.assertFileFound(compiled_file) def test_weakref_in_sys_module(self): """iter_all_python_module_file() ignores weakref modules.""" time_proxy = weakref.proxy(time) sys.modules['time_proxy'] = time_proxy self.addCleanup(lambda: sys.modules.pop('time_proxy', None)) list(autoreload.iter_all_python_module_files()) # No crash. def test_module_without_spec(self): module = types.ModuleType('test_module') del module.__spec__ self.assertEqual(autoreload.iter_modules_and_files((module,), frozenset()), frozenset()) def test_main_module_is_resolved(self): main_module = sys.modules['__main__'] self.assertFileFound(Path(main_module.__file__)) def test_main_module_without_file_is_not_resolved(self): fake_main = types.ModuleType('__main__') self.assertEqual(autoreload.iter_modules_and_files((fake_main,), frozenset()), frozenset()) def test_path_with_embedded_null_bytes(self): for path in ( 'embedded_null_byte\x00.py', 'di\x00rectory/embedded_null_byte.py', ): with self.subTest(path=path): self.assertEqual( autoreload.iter_modules_and_files((), frozenset([path])), frozenset(), ) class TestCommonRoots(SimpleTestCase): def test_common_roots(self): paths = ( Path('/first/second'), Path('/first/second/third'), Path('/first/'), Path('/root/first/'), ) results = autoreload.common_roots(paths) self.assertCountEqual(results, [Path('/first/'), Path('/root/first/')]) class TestSysPathDirectories(SimpleTestCase): def setUp(self): self._directory = tempfile.TemporaryDirectory() self.directory = Path(self._directory.name).resolve().absolute() self.file = self.directory / 'test' self.file.touch() def tearDown(self): self._directory.cleanup() def test_sys_paths_with_directories(self): with extend_sys_path(str(self.file)): paths = list(autoreload.sys_path_directories()) self.assertIn(self.file.parent, paths) def test_sys_paths_non_existing(self): nonexistent_file = Path(self.directory.name) / 'does_not_exist' with extend_sys_path(str(nonexistent_file)): paths = list(autoreload.sys_path_directories()) self.assertNotIn(nonexistent_file, paths) self.assertNotIn(nonexistent_file.parent, paths) def test_sys_paths_absolute(self): paths = list(autoreload.sys_path_directories()) self.assertTrue(all(p.is_absolute() for p in paths)) def test_sys_paths_directories(self): with extend_sys_path(str(self.directory)): paths = list(autoreload.sys_path_directories()) self.assertIn(self.directory, paths) class GetReloaderTests(SimpleTestCase): @mock.patch('django.utils.autoreload.WatchmanReloader') def test_watchman_unavailable(self, mocked_watchman): mocked_watchman.check_availability.side_effect = WatchmanUnavailable self.assertIsInstance(autoreload.get_reloader(), autoreload.StatReloader) @mock.patch.object(autoreload.WatchmanReloader, 'check_availability') def test_watchman_available(self, mocked_available): # If WatchmanUnavailable isn't raised, Watchman will be chosen. mocked_available.return_value = None result = autoreload.get_reloader() self.assertIsInstance(result, autoreload.WatchmanReloader) class RunWithReloaderTests(SimpleTestCase): @mock.patch.dict(os.environ, {autoreload.DJANGO_AUTORELOAD_ENV: 'true'}) @mock.patch('django.utils.autoreload.get_reloader') def test_swallows_keyboard_interrupt(self, mocked_get_reloader): mocked_get_reloader.side_effect = KeyboardInterrupt() autoreload.run_with_reloader(lambda: None) # No exception @mock.patch.dict(os.environ, {autoreload.DJANGO_AUTORELOAD_ENV: 'false'}) @mock.patch('django.utils.autoreload.restart_with_reloader') def test_calls_sys_exit(self, mocked_restart_reloader): mocked_restart_reloader.return_value = 1 with self.assertRaises(SystemExit) as exc: autoreload.run_with_reloader(lambda: None) self.assertEqual(exc.exception.code, 1) @mock.patch.dict(os.environ, {autoreload.DJANGO_AUTORELOAD_ENV: 'true'}) @mock.patch('django.utils.autoreload.start_django') @mock.patch('django.utils.autoreload.get_reloader') def test_calls_start_django(self, mocked_reloader, mocked_start_django): mocked_reloader.return_value = mock.sentinel.RELOADER autoreload.run_with_reloader(mock.sentinel.METHOD) self.assertEqual(mocked_start_django.call_count, 1) self.assertSequenceEqual( mocked_start_django.call_args[0], [mock.sentinel.RELOADER, mock.sentinel.METHOD] ) class StartDjangoTests(SimpleTestCase): @mock.patch('django.utils.autoreload.StatReloader') def test_watchman_becomes_unavailable(self, mocked_stat): mocked_stat.should_stop.return_value = True fake_reloader = mock.MagicMock() fake_reloader.should_stop = False fake_reloader.run.side_effect = autoreload.WatchmanUnavailable() autoreload.start_django(fake_reloader, lambda: None) self.assertEqual(mocked_stat.call_count, 1) @mock.patch('django.utils.autoreload.ensure_echo_on') def test_echo_on_called(self, mocked_echo): fake_reloader = mock.MagicMock() autoreload.start_django(fake_reloader, lambda: None) self.assertEqual(mocked_echo.call_count, 1) @mock.patch('django.utils.autoreload.check_errors') def test_check_errors_called(self, mocked_check_errors): fake_method = mock.MagicMock(return_value=None) fake_reloader = mock.MagicMock() autoreload.start_django(fake_reloader, fake_method) self.assertCountEqual(mocked_check_errors.call_args[0], [fake_method]) @mock.patch('threading.Thread') @mock.patch('django.utils.autoreload.check_errors') def test_starts_thread_with_args(self, mocked_check_errors, mocked_thread): fake_reloader = mock.MagicMock() fake_main_func = mock.MagicMock() fake_thread = mock.MagicMock() mocked_check_errors.return_value = fake_main_func mocked_thread.return_value = fake_thread autoreload.start_django(fake_reloader, fake_main_func, 123, abc=123) self.assertEqual(mocked_thread.call_count, 1) self.assertEqual( mocked_thread.call_args[1], {'target': fake_main_func, 'args': (123,), 'kwargs': {'abc': 123}, 'name': 'django-main-thread'} ) self.assertSequenceEqual(fake_thread.setDaemon.call_args[0], [True]) self.assertTrue(fake_thread.start.called) class TestCheckErrors(SimpleTestCase): def test_mutates_error_files(self): fake_method = mock.MagicMock(side_effect=RuntimeError()) wrapped = autoreload.check_errors(fake_method) with mock.patch.object(autoreload, '_error_files') as mocked_error_files: with self.assertRaises(RuntimeError): wrapped() self.assertEqual(mocked_error_files.append.call_count, 1) class TestRaiseLastException(SimpleTestCase): @mock.patch('django.utils.autoreload._exception', None) def test_no_exception(self): # Should raise no exception if _exception is None autoreload.raise_last_exception() def test_raises_exception(self): class MyException(Exception): pass # Create an exception try: raise MyException('Test Message') except MyException: exc_info = sys.exc_info() with mock.patch('django.utils.autoreload._exception', exc_info): with self.assertRaisesMessage(MyException, 'Test Message'): autoreload.raise_last_exception() def test_raises_custom_exception(self): class MyException(Exception): def __init__(self, msg, extra_context): super().__init__(msg) self.extra_context = extra_context # Create an exception. try: raise MyException('Test Message', 'extra context') except MyException: exc_info = sys.exc_info() with mock.patch('django.utils.autoreload._exception', exc_info): with self.assertRaisesMessage(MyException, 'Test Message'): autoreload.raise_last_exception() def test_raises_exception_with_context(self): try: raise Exception(2) except Exception as e: try: raise Exception(1) from e except Exception: exc_info = sys.exc_info() with mock.patch('django.utils.autoreload._exception', exc_info): with self.assertRaises(Exception) as cm: autoreload.raise_last_exception() self.assertEqual(cm.exception.args[0], 1) self.assertEqual(cm.exception.__cause__.args[0], 2) class RestartWithReloaderTests(SimpleTestCase): executable = '/usr/bin/python' def patch_autoreload(self, argv): patch_call = mock.patch('django.utils.autoreload.subprocess.call', return_value=0) patches = [ mock.patch('django.utils.autoreload.sys.argv', argv), mock.patch('django.utils.autoreload.sys.executable', self.executable), mock.patch('django.utils.autoreload.sys.warnoptions', ['all']), ] for p in patches: p.start() self.addCleanup(p.stop) mock_call = patch_call.start() self.addCleanup(patch_call.stop) return mock_call def test_manage_py(self): argv = ['./manage.py', 'runserver'] mock_call = self.patch_autoreload(argv) autoreload.restart_with_reloader() self.assertEqual(mock_call.call_count, 1) self.assertEqual(mock_call.call_args[0][0], [self.executable, '-Wall'] + argv) def test_python_m_django(self): main = '/usr/lib/pythonX.Y/site-packages/django/__main__.py' argv = [main, 'runserver'] mock_call = self.patch_autoreload(argv) with mock.patch('django.__main__.__file__', main): autoreload.restart_with_reloader() self.assertEqual(mock_call.call_count, 1) self.assertEqual(mock_call.call_args[0][0], [self.executable, '-Wall', '-m', 'django'] + argv[1:]) class ReloaderTests(SimpleTestCase): RELOADER_CLS = None def setUp(self): self._tempdir = tempfile.TemporaryDirectory() self.tempdir = Path(self._tempdir.name).resolve().absolute() self.existing_file = self.ensure_file(self.tempdir / 'test.py') self.nonexistent_file = (self.tempdir / 'does_not_exist.py').absolute() self.reloader = self.RELOADER_CLS() def tearDown(self): self._tempdir.cleanup() self.reloader.stop() def ensure_file(self, path): path.parent.mkdir(exist_ok=True, parents=True) path.touch() # On Linux and Windows updating the mtime of a file using touch() will set a timestamp # value that is in the past, as the time value for the last kernel tick is used rather # than getting the correct absolute time. # To make testing simpler set the mtime to be the observed time when this function is # called. self.set_mtime(path, time.time()) return path.absolute() def set_mtime(self, fp, value): os.utime(str(fp), (value, value)) def increment_mtime(self, fp, by=1): current_time = time.time() self.set_mtime(fp, current_time + by) @contextlib.contextmanager def tick_twice(self): ticker = self.reloader.tick() next(ticker) yield next(ticker) class IntegrationTests: @mock.patch('django.utils.autoreload.BaseReloader.notify_file_changed') @mock.patch('django.utils.autoreload.iter_all_python_module_files', return_value=frozenset()) def test_glob(self, mocked_modules, notify_mock): non_py_file = self.ensure_file(self.tempdir / 'non_py_file') self.reloader.watch_dir(self.tempdir, '*.py') with self.tick_twice(): self.increment_mtime(non_py_file) self.increment_mtime(self.existing_file) self.assertEqual(notify_mock.call_count, 1) self.assertCountEqual(notify_mock.call_args[0], [self.existing_file]) @mock.patch('django.utils.autoreload.BaseReloader.notify_file_changed') @mock.patch('django.utils.autoreload.iter_all_python_module_files', return_value=frozenset()) def test_multiple_globs(self, mocked_modules, notify_mock): self.ensure_file(self.tempdir / 'x.test') self.reloader.watch_dir(self.tempdir, '*.py') self.reloader.watch_dir(self.tempdir, '*.test') with self.tick_twice(): self.increment_mtime(self.existing_file) self.assertEqual(notify_mock.call_count, 1) self.assertCountEqual(notify_mock.call_args[0], [self.existing_file]) @mock.patch('django.utils.autoreload.BaseReloader.notify_file_changed') @mock.patch('django.utils.autoreload.iter_all_python_module_files', return_value=frozenset()) def test_overlapping_globs(self, mocked_modules, notify_mock): self.reloader.watch_dir(self.tempdir, '*.py') self.reloader.watch_dir(self.tempdir, '*.p*') with self.tick_twice(): self.increment_mtime(self.existing_file) self.assertEqual(notify_mock.call_count, 1) self.assertCountEqual(notify_mock.call_args[0], [self.existing_file]) @mock.patch('django.utils.autoreload.BaseReloader.notify_file_changed') @mock.patch('django.utils.autoreload.iter_all_python_module_files', return_value=frozenset()) def test_glob_recursive(self, mocked_modules, notify_mock): non_py_file = self.ensure_file(self.tempdir / 'dir' / 'non_py_file') py_file = self.ensure_file(self.tempdir / 'dir' / 'file.py') self.reloader.watch_dir(self.tempdir, '**/*.py') with self.tick_twice(): self.increment_mtime(non_py_file) self.increment_mtime(py_file) self.assertEqual(notify_mock.call_count, 1) self.assertCountEqual(notify_mock.call_args[0], [py_file]) @mock.patch('django.utils.autoreload.BaseReloader.notify_file_changed') @mock.patch('django.utils.autoreload.iter_all_python_module_files', return_value=frozenset()) def test_multiple_recursive_globs(self, mocked_modules, notify_mock): non_py_file = self.ensure_file(self.tempdir / 'dir' / 'test.txt') py_file = self.ensure_file(self.tempdir / 'dir' / 'file.py') self.reloader.watch_dir(self.tempdir, '**/*.txt') self.reloader.watch_dir(self.tempdir, '**/*.py') with self.tick_twice(): self.increment_mtime(non_py_file) self.increment_mtime(py_file) self.assertEqual(notify_mock.call_count, 2) self.assertCountEqual(notify_mock.call_args_list, [mock.call(py_file), mock.call(non_py_file)]) @mock.patch('django.utils.autoreload.BaseReloader.notify_file_changed') @mock.patch('django.utils.autoreload.iter_all_python_module_files', return_value=frozenset()) def test_nested_glob_recursive(self, mocked_modules, notify_mock): inner_py_file = self.ensure_file(self.tempdir / 'dir' / 'file.py') self.reloader.watch_dir(self.tempdir, '**/*.py') self.reloader.watch_dir(inner_py_file.parent, '**/*.py') with self.tick_twice(): self.increment_mtime(inner_py_file) self.assertEqual(notify_mock.call_count, 1) self.assertCountEqual(notify_mock.call_args[0], [inner_py_file]) @mock.patch('django.utils.autoreload.BaseReloader.notify_file_changed') @mock.patch('django.utils.autoreload.iter_all_python_module_files', return_value=frozenset()) def test_overlapping_glob_recursive(self, mocked_modules, notify_mock): py_file = self.ensure_file(self.tempdir / 'dir' / 'file.py') self.reloader.watch_dir(self.tempdir, '**/*.p*') self.reloader.watch_dir(self.tempdir, '**/*.py*') with self.tick_twice(): self.increment_mtime(py_file) self.assertEqual(notify_mock.call_count, 1) self.assertCountEqual(notify_mock.call_args[0], [py_file]) class BaseReloaderTests(ReloaderTests): RELOADER_CLS = autoreload.BaseReloader def test_watch_dir_with_unresolvable_path(self): path = Path('unresolvable_directory') with mock.patch.object(Path, 'absolute', side_effect=FileNotFoundError): self.reloader.watch_dir(path, '**/*.mo') self.assertEqual(list(self.reloader.directory_globs), []) def test_watch_with_glob(self): self.reloader.watch_dir(self.tempdir, '*.py') watched_files = list(self.reloader.watched_files()) self.assertIn(self.existing_file, watched_files) def test_watch_files_with_recursive_glob(self): inner_file = self.ensure_file(self.tempdir / 'test' / 'test.py') self.reloader.watch_dir(self.tempdir, '**/*.py') watched_files = list(self.reloader.watched_files()) self.assertIn(self.existing_file, watched_files) self.assertIn(inner_file, watched_files) def test_run_loop_catches_stopiteration(self): def mocked_tick(): yield with mock.patch.object(self.reloader, 'tick', side_effect=mocked_tick) as tick: self.reloader.run_loop() self.assertEqual(tick.call_count, 1) def test_run_loop_stop_and_return(self): def mocked_tick(*args): yield self.reloader.stop() return # Raises StopIteration with mock.patch.object(self.reloader, 'tick', side_effect=mocked_tick) as tick: self.reloader.run_loop() self.assertEqual(tick.call_count, 1) def test_wait_for_apps_ready_checks_for_exception(self): app_reg = Apps() app_reg.ready_event.set() # thread.is_alive() is False if it's not started. dead_thread = threading.Thread() self.assertFalse(self.reloader.wait_for_apps_ready(app_reg, dead_thread)) def test_wait_for_apps_ready_without_exception(self): app_reg = Apps() app_reg.ready_event.set() thread = mock.MagicMock() thread.is_alive.return_value = True self.assertTrue(self.reloader.wait_for_apps_ready(app_reg, thread)) def skip_unless_watchman_available(): try: autoreload.WatchmanReloader.check_availability() except WatchmanUnavailable as e: return skip('Watchman unavailable: %s' % e) return lambda func: func @skip_unless_watchman_available() class WatchmanReloaderTests(ReloaderTests, IntegrationTests): RELOADER_CLS = autoreload.WatchmanReloader def setUp(self): super().setUp() # Shorten the timeout to speed up tests. self.reloader.client_timeout = 0.1 def test_watch_glob_ignores_non_existing_directories_two_levels(self): with mock.patch.object(self.reloader, '_subscribe') as mocked_subscribe: self.reloader._watch_glob(self.tempdir / 'does_not_exist' / 'more', ['*']) self.assertFalse(mocked_subscribe.called) def test_watch_glob_uses_existing_parent_directories(self): with mock.patch.object(self.reloader, '_subscribe') as mocked_subscribe: self.reloader._watch_glob(self.tempdir / 'does_not_exist', ['*']) self.assertSequenceEqual( mocked_subscribe.call_args[0], [ self.tempdir, 'glob-parent-does_not_exist:%s' % self.tempdir, ['anyof', ['match', 'does_not_exist/*', 'wholename']] ] ) def test_watch_glob_multiple_patterns(self): with mock.patch.object(self.reloader, '_subscribe') as mocked_subscribe: self.reloader._watch_glob(self.tempdir, ['*', '*.py']) self.assertSequenceEqual( mocked_subscribe.call_args[0], [ self.tempdir, 'glob:%s' % self.tempdir, ['anyof', ['match', '*', 'wholename'], ['match', '*.py', 'wholename']] ] ) def test_watched_roots_contains_files(self): paths = self.reloader.watched_roots([self.existing_file]) self.assertIn(self.existing_file.parent, paths) def test_watched_roots_contains_directory_globs(self): self.reloader.watch_dir(self.tempdir, '*.py') paths = self.reloader.watched_roots([]) self.assertIn(self.tempdir, paths) def test_watched_roots_contains_sys_path(self): with extend_sys_path(str(self.tempdir)): paths = self.reloader.watched_roots([]) self.assertIn(self.tempdir, paths) def test_check_server_status(self): self.assertTrue(self.reloader.check_server_status()) def test_check_server_status_raises_error(self): with mock.patch.object(self.reloader.client, 'query') as mocked_query: mocked_query.side_effect = Exception() with self.assertRaises(autoreload.WatchmanUnavailable): self.reloader.check_server_status() @mock.patch('pywatchman.client') def test_check_availability(self, mocked_client): mocked_client().capabilityCheck.side_effect = Exception() with self.assertRaisesMessage(WatchmanUnavailable, 'Cannot connect to the watchman service'): self.RELOADER_CLS.check_availability() @mock.patch('pywatchman.client') def test_check_availability_lower_version(self, mocked_client): mocked_client().capabilityCheck.return_value = {'version': '4.8.10'} with self.assertRaisesMessage(WatchmanUnavailable, 'Watchman 4.9 or later is required.'): self.RELOADER_CLS.check_availability() def test_pywatchman_not_available(self): with mock.patch.object(autoreload, 'pywatchman') as mocked: mocked.__bool__.return_value = False with self.assertRaisesMessage(WatchmanUnavailable, 'pywatchman not installed.'): self.RELOADER_CLS.check_availability() def test_update_watches_raises_exceptions(self): class TestException(Exception): pass with mock.patch.object(self.reloader, '_update_watches') as mocked_watches: with mock.patch.object(self.reloader, 'check_server_status') as mocked_server_status: mocked_watches.side_effect = TestException() mocked_server_status.return_value = True with self.assertRaises(TestException): self.reloader.update_watches() self.assertIsInstance(mocked_server_status.call_args[0][0], TestException) @mock.patch.dict(os.environ, {'DJANGO_WATCHMAN_TIMEOUT': '10'}) def test_setting_timeout_from_environment_variable(self): self.assertEqual(self.RELOADER_CLS.client_timeout, 10) @skipIf(on_macos_with_hfs(), "These tests do not work with HFS+ as a filesystem") class StatReloaderTests(ReloaderTests, IntegrationTests): RELOADER_CLS = autoreload.StatReloader def setUp(self): super().setUp() # Shorten the sleep time to speed up tests. self.reloader.SLEEP_TIME = 0.01 @mock.patch('django.utils.autoreload.StatReloader.notify_file_changed') def test_tick_does_not_trigger_twice(self, mock_notify_file_changed): with mock.patch.object(self.reloader, 'watched_files', return_value=[self.existing_file]): ticker = self.reloader.tick() next(ticker) self.increment_mtime(self.existing_file) next(ticker) next(ticker) self.assertEqual(mock_notify_file_changed.call_count, 1) def test_snapshot_files_ignores_missing_files(self): with mock.patch.object(self.reloader, 'watched_files', return_value=[self.nonexistent_file]): self.assertEqual(dict(self.reloader.snapshot_files()), {}) def test_snapshot_files_updates(self): with mock.patch.object(self.reloader, 'watched_files', return_value=[self.existing_file]): snapshot1 = dict(self.reloader.snapshot_files()) self.assertIn(self.existing_file, snapshot1) self.increment_mtime(self.existing_file) snapshot2 = dict(self.reloader.snapshot_files()) self.assertNotEqual(snapshot1[self.existing_file], snapshot2[self.existing_file]) def test_snapshot_files_with_duplicates(self): with mock.patch.object(self.reloader, 'watched_files', return_value=[self.existing_file, self.existing_file]): snapshot = list(self.reloader.snapshot_files()) self.assertEqual(len(snapshot), 1) self.assertEqual(snapshot[0][0], self.existing_file)
2906e807fb6fbeaac90648a1cb3e8a3558021efc72c50d44a94c512d95b46ec4
import unittest from datetime import datetime from django.test import SimpleTestCase, ignore_warnings from django.utils.datastructures import MultiValueDict from django.utils.deprecation import RemovedInDjango40Warning from django.utils.http import ( base36_to_int, escape_leading_slashes, http_date, int_to_base36, is_safe_url, is_same_domain, parse_etags, parse_http_date, quote_etag, urlencode, urlquote, urlquote_plus, urlsafe_base64_decode, urlsafe_base64_encode, urlunquote, urlunquote_plus, ) class URLEncodeTests(SimpleTestCase): cannot_encode_none_msg = ( "Cannot encode None for key 'a' in a query string. Did you mean to " "pass an empty string or omit the value?" ) def test_tuples(self): self.assertEqual(urlencode((('a', 1), ('b', 2), ('c', 3))), 'a=1&b=2&c=3') def test_dict(self): result = urlencode({'a': 1, 'b': 2, 'c': 3}) # Dictionaries are treated as unordered. self.assertIn(result, [ 'a=1&b=2&c=3', 'a=1&c=3&b=2', 'b=2&a=1&c=3', 'b=2&c=3&a=1', 'c=3&a=1&b=2', 'c=3&b=2&a=1', ]) def test_dict_containing_sequence_not_doseq(self): self.assertEqual(urlencode({'a': [1, 2]}, doseq=False), 'a=%5B1%2C+2%5D') def test_dict_containing_tuple_not_doseq(self): self.assertEqual(urlencode({'a': (1, 2)}, doseq=False), 'a=%281%2C+2%29') def test_custom_iterable_not_doseq(self): class IterableWithStr: def __str__(self): return 'custom' def __iter__(self): yield from range(0, 3) self.assertEqual(urlencode({'a': IterableWithStr()}, doseq=False), 'a=custom') def test_dict_containing_sequence_doseq(self): self.assertEqual(urlencode({'a': [1, 2]}, doseq=True), 'a=1&a=2') def test_dict_containing_empty_sequence_doseq(self): self.assertEqual(urlencode({'a': []}, doseq=True), '') def test_multivaluedict(self): result = urlencode(MultiValueDict({ 'name': ['Adrian', 'Simon'], 'position': ['Developer'], }), doseq=True) # MultiValueDicts are similarly unordered. self.assertIn(result, [ 'name=Adrian&name=Simon&position=Developer', 'position=Developer&name=Adrian&name=Simon', ]) def test_dict_with_bytes_values(self): self.assertEqual(urlencode({'a': b'abc'}, doseq=True), 'a=abc') def test_dict_with_sequence_of_bytes(self): self.assertEqual(urlencode({'a': [b'spam', b'eggs', b'bacon']}, doseq=True), 'a=spam&a=eggs&a=bacon') def test_dict_with_bytearray(self): self.assertEqual(urlencode({'a': bytearray(range(2))}, doseq=True), 'a=0&a=1') def test_generator(self): self.assertEqual(urlencode({'a': range(2)}, doseq=True), 'a=0&a=1') self.assertEqual(urlencode({'a': range(2)}, doseq=False), 'a=range%280%2C+2%29') def test_none(self): with self.assertRaisesMessage(TypeError, self.cannot_encode_none_msg): urlencode({'a': None}) def test_none_in_sequence(self): with self.assertRaisesMessage(TypeError, self.cannot_encode_none_msg): urlencode({'a': [None]}, doseq=True) def test_none_in_generator(self): def gen(): yield None with self.assertRaisesMessage(TypeError, self.cannot_encode_none_msg): urlencode({'a': gen()}, doseq=True) class Base36IntTests(SimpleTestCase): def test_roundtrip(self): for n in [0, 1, 1000, 1000000]: self.assertEqual(n, base36_to_int(int_to_base36(n))) def test_negative_input(self): with self.assertRaisesMessage(ValueError, 'Negative base36 conversion input.'): int_to_base36(-1) def test_to_base36_errors(self): for n in ['1', 'foo', {1: 2}, (1, 2, 3), 3.141]: with self.assertRaises(TypeError): int_to_base36(n) def test_invalid_literal(self): for n in ['#', ' ']: with self.assertRaisesMessage(ValueError, "invalid literal for int() with base 36: '%s'" % n): base36_to_int(n) def test_input_too_large(self): with self.assertRaisesMessage(ValueError, 'Base36 input too large'): base36_to_int('1' * 14) def test_to_int_errors(self): for n in [123, {1: 2}, (1, 2, 3), 3.141]: with self.assertRaises(TypeError): base36_to_int(n) def test_values(self): for n, b36 in [(0, '0'), (1, '1'), (42, '16'), (818469960, 'django')]: self.assertEqual(int_to_base36(n), b36) self.assertEqual(base36_to_int(b36), n) class IsSafeURLTests(unittest.TestCase): def test_bad_urls(self): bad_urls = ( 'http://example.com', 'http:///example.com', 'https://example.com', 'ftp://example.com', r'\\example.com', r'\\\example.com', r'/\\/example.com', r'\\\example.com', r'\\example.com', r'\\//example.com', r'/\/example.com', r'\/example.com', r'/\example.com', 'http:///example.com', r'http:/\//example.com', r'http:\/example.com', r'http:/\example.com', 'javascript:alert("XSS")', '\njavascript:alert(x)', '\x08//example.com', r'http://otherserver\@example.com', r'http:\\testserver\@example.com', r'http://testserver\me:[email protected]', r'http://testserver\@example.com', r'http:\\testserver\confirm\[email protected]', 'http:999999999', 'ftp:9999999999', '\n', 'http://[2001:cdba:0000:0000:0000:0000:3257:9652/', 'http://2001:cdba:0000:0000:0000:0000:3257:9652]/', ) for bad_url in bad_urls: with self.subTest(url=bad_url): self.assertIs(is_safe_url(bad_url, allowed_hosts={'testserver', 'testserver2'}), False) def test_good_urls(self): good_urls = ( '/view/?param=http://example.com', '/view/?param=https://example.com', '/view?param=ftp://example.com', 'view/?param=//example.com', 'https://testserver/', 'HTTPS://testserver/', '//testserver/', 'http://testserver/[email protected]', '/url%20with%20spaces/', 'path/http:2222222222', ) for good_url in good_urls: with self.subTest(url=good_url): self.assertIs(is_safe_url(good_url, allowed_hosts={'otherserver', 'testserver'}), True) def test_basic_auth(self): # Valid basic auth credentials are allowed. self.assertIs(is_safe_url(r'http://user:pass@testserver/', allowed_hosts={'user:pass@testserver'}), True) def test_no_allowed_hosts(self): # A path without host is allowed. self.assertIs(is_safe_url('/confirm/[email protected]', allowed_hosts=None), True) # Basic auth without host is not allowed. self.assertIs(is_safe_url(r'http://testserver\@example.com', allowed_hosts=None), False) def test_allowed_hosts_str(self): self.assertIs(is_safe_url('http://good.com/good', allowed_hosts='good.com'), True) self.assertIs(is_safe_url('http://good.co/evil', allowed_hosts='good.com'), False) def test_secure_param_https_urls(self): secure_urls = ( 'https://example.com/p', 'HTTPS://example.com/p', '/view/?param=http://example.com', ) for url in secure_urls: with self.subTest(url=url): self.assertIs(is_safe_url(url, allowed_hosts={'example.com'}, require_https=True), True) def test_secure_param_non_https_urls(self): insecure_urls = ( 'http://example.com/p', 'ftp://example.com/p', '//example.com/p', ) for url in insecure_urls: with self.subTest(url=url): self.assertIs(is_safe_url(url, allowed_hosts={'example.com'}, require_https=True), False) class URLSafeBase64Tests(unittest.TestCase): def test_roundtrip(self): bytestring = b'foo' encoded = urlsafe_base64_encode(bytestring) decoded = urlsafe_base64_decode(encoded) self.assertEqual(bytestring, decoded) @ignore_warnings(category=RemovedInDjango40Warning) class URLQuoteTests(unittest.TestCase): def test_quote(self): self.assertEqual(urlquote('Paris & Orl\xe9ans'), 'Paris%20%26%20Orl%C3%A9ans') self.assertEqual(urlquote('Paris & Orl\xe9ans', safe="&"), 'Paris%20&%20Orl%C3%A9ans') def test_unquote(self): self.assertEqual(urlunquote('Paris%20%26%20Orl%C3%A9ans'), 'Paris & Orl\xe9ans') self.assertEqual(urlunquote('Paris%20&%20Orl%C3%A9ans'), 'Paris & Orl\xe9ans') def test_quote_plus(self): self.assertEqual(urlquote_plus('Paris & Orl\xe9ans'), 'Paris+%26+Orl%C3%A9ans') self.assertEqual(urlquote_plus('Paris & Orl\xe9ans', safe="&"), 'Paris+&+Orl%C3%A9ans') def test_unquote_plus(self): self.assertEqual(urlunquote_plus('Paris+%26+Orl%C3%A9ans'), 'Paris & Orl\xe9ans') self.assertEqual(urlunquote_plus('Paris+&+Orl%C3%A9ans'), 'Paris & Orl\xe9ans') class IsSameDomainTests(unittest.TestCase): def test_good(self): for pair in ( ('example.com', 'example.com'), ('example.com', '.example.com'), ('foo.example.com', '.example.com'), ('example.com:8888', 'example.com:8888'), ('example.com:8888', '.example.com:8888'), ('foo.example.com:8888', '.example.com:8888'), ): self.assertIs(is_same_domain(*pair), True) def test_bad(self): for pair in ( ('example2.com', 'example.com'), ('foo.example.com', 'example.com'), ('example.com:9999', 'example.com:8888'), ('foo.example.com:8888', ''), ): self.assertIs(is_same_domain(*pair), False) class ETagProcessingTests(unittest.TestCase): def test_parsing(self): self.assertEqual( parse_etags(r'"" , "etag", "e\\tag", W/"weak"'), ['""', '"etag"', r'"e\\tag"', 'W/"weak"'] ) self.assertEqual(parse_etags('*'), ['*']) # Ignore RFC 2616 ETags that are invalid according to RFC 7232. self.assertEqual(parse_etags(r'"etag", "e\"t\"ag"'), ['"etag"']) def test_quoting(self): self.assertEqual(quote_etag('etag'), '"etag"') # unquoted self.assertEqual(quote_etag('"etag"'), '"etag"') # quoted self.assertEqual(quote_etag('W/"etag"'), 'W/"etag"') # quoted, weak class HttpDateProcessingTests(unittest.TestCase): def test_http_date(self): t = 1167616461.0 self.assertEqual(http_date(t), 'Mon, 01 Jan 2007 01:54:21 GMT') def test_parsing_rfc1123(self): parsed = parse_http_date('Sun, 06 Nov 1994 08:49:37 GMT') self.assertEqual(datetime.utcfromtimestamp(parsed), datetime(1994, 11, 6, 8, 49, 37)) def test_parsing_rfc850(self): parsed = parse_http_date('Sunday, 06-Nov-94 08:49:37 GMT') self.assertEqual(datetime.utcfromtimestamp(parsed), datetime(1994, 11, 6, 8, 49, 37)) def test_parsing_asctime(self): parsed = parse_http_date('Sun Nov 6 08:49:37 1994') self.assertEqual(datetime.utcfromtimestamp(parsed), datetime(1994, 11, 6, 8, 49, 37)) def test_parsing_year_less_than_70(self): parsed = parse_http_date('Sun Nov 6 08:49:37 0037') self.assertEqual(datetime.utcfromtimestamp(parsed), datetime(2037, 11, 6, 8, 49, 37)) class EscapeLeadingSlashesTests(unittest.TestCase): def test(self): tests = ( ('//example.com', '/%2Fexample.com'), ('//', '/%2F'), ) for url, expected in tests: with self.subTest(url=url): self.assertEqual(escape_leading_slashes(url), expected)
fd71d22eb0dcd21464635faa427321fdd26fd08bf3e4dc961ad153d3cf4573c5
from django.conf import settings from django.db import connection, models from django.db.models.query_utils import Q from django.test import SimpleTestCase, TestCase, skipUnlessDBFeature from django.test.utils import isolate_apps from .models import Book, ChildModel1, ChildModel2 class SimpleIndexesTests(SimpleTestCase): def test_suffix(self): self.assertEqual(models.Index.suffix, 'idx') def test_repr(self): index = models.Index(fields=['title']) multi_col_index = models.Index(fields=['title', 'author']) partial_index = models.Index(fields=['title'], name='long_books_idx', condition=Q(pages__gt=400)) self.assertEqual(repr(index), "<Index: fields='title'>") self.assertEqual(repr(multi_col_index), "<Index: fields='title, author'>") self.assertEqual(repr(partial_index), "<Index: fields='title', condition=(AND: ('pages__gt', 400))>") def test_eq(self): index = models.Index(fields=['title']) same_index = models.Index(fields=['title']) another_index = models.Index(fields=['title', 'author']) index.model = Book same_index.model = Book another_index.model = Book self.assertEqual(index, same_index) self.assertNotEqual(index, another_index) def test_index_fields_type(self): with self.assertRaisesMessage(ValueError, 'Index.fields must be a list or tuple.'): models.Index(fields='title') def test_fields_tuple(self): self.assertEqual(models.Index(fields=('title',)).fields, ['title']) def test_raises_error_without_field(self): msg = 'At least one field is required to define an index.' with self.assertRaisesMessage(ValueError, msg): models.Index() def test_opclasses_requires_index_name(self): with self.assertRaisesMessage(ValueError, 'An index must be named to use opclasses.'): models.Index(opclasses=['jsonb_path_ops']) def test_opclasses_requires_list_or_tuple(self): with self.assertRaisesMessage(ValueError, 'Index.opclasses must be a list or tuple.'): models.Index(name='test_opclass', fields=['field'], opclasses='jsonb_path_ops') def test_opclasses_and_fields_same_length(self): msg = 'Index.fields and Index.opclasses must have the same number of elements.' with self.assertRaisesMessage(ValueError, msg): models.Index(name='test_opclass', fields=['field', 'other'], opclasses=['jsonb_path_ops']) def test_condition_requires_index_name(self): with self.assertRaisesMessage(ValueError, 'An index must be named to use condition.'): models.Index(condition=Q(pages__gt=400)) def test_condition_must_be_q(self): with self.assertRaisesMessage(ValueError, 'Index.condition must be a Q instance.'): models.Index(condition='invalid', name='long_book_idx') def test_name_auto_generation(self): index = models.Index(fields=['author']) index.set_name_with_model(Book) self.assertEqual(index.name, 'model_index_author_0f5565_idx') # '-' for DESC columns should be accounted for in the index name. index = models.Index(fields=['-author']) index.set_name_with_model(Book) self.assertEqual(index.name, 'model_index_author_708765_idx') # fields may be truncated in the name. db_column is used for naming. long_field_index = models.Index(fields=['pages']) long_field_index.set_name_with_model(Book) self.assertEqual(long_field_index.name, 'model_index_page_co_69235a_idx') # suffix can't be longer than 3 characters. long_field_index.suffix = 'suff' msg = 'Index too long for multiple database support. Is self.suffix longer than 3 characters?' with self.assertRaisesMessage(AssertionError, msg): long_field_index.set_name_with_model(Book) @isolate_apps('model_indexes') def test_name_auto_generation_with_quoted_db_table(self): class QuotedDbTable(models.Model): name = models.CharField(max_length=50) class Meta: db_table = '"t_quoted"' index = models.Index(fields=['name']) index.set_name_with_model(QuotedDbTable) self.assertEqual(index.name, 't_quoted_name_e4ed1b_idx') def test_deconstruction(self): index = models.Index(fields=['title'], db_tablespace='idx_tbls') index.set_name_with_model(Book) path, args, kwargs = index.deconstruct() self.assertEqual(path, 'django.db.models.Index') self.assertEqual(args, ()) self.assertEqual( kwargs, {'fields': ['title'], 'name': 'model_index_title_196f42_idx', 'db_tablespace': 'idx_tbls'} ) def test_deconstruct_with_condition(self): index = models.Index( name='big_book_index', fields=['title'], condition=Q(pages__gt=400), ) index.set_name_with_model(Book) path, args, kwargs = index.deconstruct() self.assertEqual(path, 'django.db.models.Index') self.assertEqual(args, ()) self.assertEqual( kwargs, { 'fields': ['title'], 'name': 'model_index_title_196f42_idx', 'condition': Q(pages__gt=400), } ) def test_clone(self): index = models.Index(fields=['title']) new_index = index.clone() self.assertIsNot(index, new_index) self.assertEqual(index.fields, new_index.fields) def test_name_set(self): index_names = [index.name for index in Book._meta.indexes] self.assertCountEqual( index_names, [ 'model_index_title_196f42_idx', 'model_index_isbn_34f975_idx', 'model_indexes_book_barcode_idx', ], ) def test_abstract_children(self): index_names = [index.name for index in ChildModel1._meta.indexes] self.assertEqual( index_names, ['model_index_name_440998_idx', 'model_indexes_childmodel1_idx'], ) index_names = [index.name for index in ChildModel2._meta.indexes] self.assertEqual( index_names, ['model_index_name_b6c374_idx', 'model_indexes_childmodel2_idx'], ) class IndexesTests(TestCase): @skipUnlessDBFeature('supports_tablespaces') def test_db_tablespace(self): editor = connection.schema_editor() # Index with db_tablespace attribute. for fields in [ # Field with db_tablespace specified on model. ['shortcut'], # Field without db_tablespace specified on model. ['author'], # Multi-column with db_tablespaces specified on model. ['shortcut', 'isbn'], # Multi-column without db_tablespace specified on model. ['title', 'author'], ]: with self.subTest(fields=fields): index = models.Index(fields=fields, db_tablespace='idx_tbls2') self.assertIn('"idx_tbls2"', str(index.create_sql(Book, editor)).lower()) # Indexes without db_tablespace attribute. for fields in [['author'], ['shortcut', 'isbn'], ['title', 'author']]: with self.subTest(fields=fields): index = models.Index(fields=fields) # The DEFAULT_INDEX_TABLESPACE setting can't be tested because # it's evaluated when the model class is defined. As a # consequence, @override_settings doesn't work. if settings.DEFAULT_INDEX_TABLESPACE: self.assertIn( '"%s"' % settings.DEFAULT_INDEX_TABLESPACE, str(index.create_sql(Book, editor)).lower() ) else: self.assertNotIn('TABLESPACE', str(index.create_sql(Book, editor))) # Field with db_tablespace specified on the model and an index without # db_tablespace. index = models.Index(fields=['shortcut']) self.assertIn('"idx_tbls"', str(index.create_sql(Book, editor)).lower())
ad1d4e4aa96b7e4b4d48fc8b6fdedf0cb597244613966a1b44c186e75b3216cb
import datetime import pickle import sys import unittest from operator import attrgetter from django.core.exceptions import EmptyResultSet, FieldError from django.db import DEFAULT_DB_ALIAS, connection from django.db.models import Count, F, Q from django.db.models.sql.constants import LOUTER from django.db.models.sql.where import NothingNode, WhereNode from django.test import SimpleTestCase, TestCase, skipUnlessDBFeature from django.test.utils import CaptureQueriesContext from .models import ( FK1, Annotation, Article, Author, BaseA, Book, CategoryItem, CategoryRelationship, Celebrity, Channel, Chapter, Child, ChildObjectA, Classroom, CommonMixedCaseForeignKeys, Company, Cover, CustomPk, CustomPkTag, DateTimePK, Detail, DumbCategory, Eaten, Employment, ExtraInfo, Fan, Food, Identifier, Individual, Item, Job, JobResponsibilities, Join, LeafA, LeafB, LoopX, LoopZ, ManagedModel, Member, MixedCaseDbColumnCategoryItem, MixedCaseFieldCategoryItem, ModelA, ModelB, ModelC, ModelD, MyObject, NamedCategory, Node, Note, NullableName, Number, ObjectA, ObjectB, ObjectC, OneToOneCategory, Order, OrderItem, Page, Paragraph, Person, Plaything, PointerA, Program, ProxyCategory, ProxyObjectA, ProxyObjectB, Ranking, Related, RelatedIndividual, RelatedObject, Report, ReportComment, ReservedName, Responsibility, School, SharedConnection, SimpleCategory, SingleObject, SpecialCategory, Staff, StaffUser, Student, Tag, Task, Teacher, Ticket21203Child, Ticket21203Parent, Ticket23605A, Ticket23605B, Ticket23605C, TvChef, Valid, X, ) class Queries1Tests(TestCase): @classmethod def setUpTestData(cls): generic = NamedCategory.objects.create(name="Generic") cls.t1 = Tag.objects.create(name='t1', category=generic) cls.t2 = Tag.objects.create(name='t2', parent=cls.t1, category=generic) cls.t3 = Tag.objects.create(name='t3', parent=cls.t1) t4 = Tag.objects.create(name='t4', parent=cls.t3) cls.t5 = Tag.objects.create(name='t5', parent=cls.t3) cls.n1 = Note.objects.create(note='n1', misc='foo', id=1) cls.n2 = Note.objects.create(note='n2', misc='bar', id=2) cls.n3 = Note.objects.create(note='n3', misc='foo', id=3) ann1 = Annotation.objects.create(name='a1', tag=cls.t1) ann1.notes.add(cls.n1) ann2 = Annotation.objects.create(name='a2', tag=t4) ann2.notes.add(cls.n2, cls.n3) # Create these out of order so that sorting by 'id' will be different to sorting # by 'info'. Helps detect some problems later. cls.e2 = ExtraInfo.objects.create(info='e2', note=cls.n2, value=41) e1 = ExtraInfo.objects.create(info='e1', note=cls.n1, value=42) cls.a1 = Author.objects.create(name='a1', num=1001, extra=e1) cls.a2 = Author.objects.create(name='a2', num=2002, extra=e1) a3 = Author.objects.create(name='a3', num=3003, extra=cls.e2) cls.a4 = Author.objects.create(name='a4', num=4004, extra=cls.e2) cls.time1 = datetime.datetime(2007, 12, 19, 22, 25, 0) cls.time2 = datetime.datetime(2007, 12, 19, 21, 0, 0) time3 = datetime.datetime(2007, 12, 20, 22, 25, 0) time4 = datetime.datetime(2007, 12, 20, 21, 0, 0) cls.i1 = Item.objects.create(name='one', created=cls.time1, modified=cls.time1, creator=cls.a1, note=cls.n3) cls.i1.tags.set([cls.t1, cls.t2]) cls.i2 = Item.objects.create(name='two', created=cls.time2, creator=cls.a2, note=cls.n2) cls.i2.tags.set([cls.t1, cls.t3]) cls.i3 = Item.objects.create(name='three', created=time3, creator=cls.a2, note=cls.n3) i4 = Item.objects.create(name='four', created=time4, creator=cls.a4, note=cls.n3) i4.tags.set([t4]) cls.r1 = Report.objects.create(name='r1', creator=cls.a1) Report.objects.create(name='r2', creator=a3) Report.objects.create(name='r3') # Ordering by 'rank' gives us rank2, rank1, rank3. Ordering by the Meta.ordering # will be rank3, rank2, rank1. cls.rank1 = Ranking.objects.create(rank=2, author=cls.a2) Cover.objects.create(title="first", item=i4) Cover.objects.create(title="second", item=cls.i2) def test_subquery_condition(self): qs1 = Tag.objects.filter(pk__lte=0) qs2 = Tag.objects.filter(parent__in=qs1) qs3 = Tag.objects.filter(parent__in=qs2) self.assertEqual(qs3.query.subq_aliases, {'T', 'U', 'V'}) self.assertIn('v0', str(qs3.query).lower()) qs4 = qs3.filter(parent__in=qs1) self.assertEqual(qs4.query.subq_aliases, {'T', 'U', 'V'}) # It is possible to reuse U for the second subquery, no need to use W. self.assertNotIn('w0', str(qs4.query).lower()) # So, 'U0."id"' is referenced in SELECT and WHERE twice. self.assertEqual(str(qs4.query).lower().count('u0.'), 4) def test_ticket1050(self): self.assertQuerysetEqual( Item.objects.filter(tags__isnull=True), ['<Item: three>'] ) self.assertQuerysetEqual( Item.objects.filter(tags__id__isnull=True), ['<Item: three>'] ) def test_ticket1801(self): self.assertQuerysetEqual( Author.objects.filter(item=self.i2), ['<Author: a2>'] ) self.assertQuerysetEqual( Author.objects.filter(item=self.i3), ['<Author: a2>'] ) self.assertQuerysetEqual( Author.objects.filter(item=self.i2) & Author.objects.filter(item=self.i3), ['<Author: a2>'] ) def test_ticket2306(self): # Checking that no join types are "left outer" joins. query = Item.objects.filter(tags=self.t2).query self.assertNotIn(LOUTER, [x.join_type for x in query.alias_map.values()]) self.assertQuerysetEqual( Item.objects.filter(Q(tags=self.t1)).order_by('name'), ['<Item: one>', '<Item: two>'] ) self.assertQuerysetEqual( Item.objects.filter(Q(tags=self.t1)).filter(Q(tags=self.t2)), ['<Item: one>'] ) self.assertQuerysetEqual( Item.objects.filter(Q(tags=self.t1)).filter(Q(creator__name='fred') | Q(tags=self.t2)), ['<Item: one>'] ) # Each filter call is processed "at once" against a single table, so this is # different from the previous example as it tries to find tags that are two # things at once (rather than two tags). self.assertQuerysetEqual( Item.objects.filter(Q(tags=self.t1) & Q(tags=self.t2)), [] ) self.assertQuerysetEqual( Item.objects.filter(Q(tags=self.t1), Q(creator__name='fred') | Q(tags=self.t2)), [] ) qs = Author.objects.filter(ranking__rank=2, ranking__id=self.rank1.id) self.assertQuerysetEqual(list(qs), ['<Author: a2>']) self.assertEqual(2, qs.query.count_active_tables(), 2) qs = Author.objects.filter(ranking__rank=2).filter(ranking__id=self.rank1.id) self.assertEqual(qs.query.count_active_tables(), 3) def test_ticket4464(self): self.assertQuerysetEqual( Item.objects.filter(tags=self.t1).filter(tags=self.t2), ['<Item: one>'] ) self.assertQuerysetEqual( Item.objects.filter(tags__in=[self.t1, self.t2]).distinct().order_by('name'), ['<Item: one>', '<Item: two>'] ) self.assertQuerysetEqual( Item.objects.filter(tags__in=[self.t1, self.t2]).filter(tags=self.t3), ['<Item: two>'] ) # Make sure .distinct() works with slicing (this was broken in Oracle). self.assertQuerysetEqual( Item.objects.filter(tags__in=[self.t1, self.t2]).order_by('name')[:3], ['<Item: one>', '<Item: one>', '<Item: two>'] ) self.assertQuerysetEqual( Item.objects.filter(tags__in=[self.t1, self.t2]).distinct().order_by('name')[:3], ['<Item: one>', '<Item: two>'] ) def test_tickets_2080_3592(self): self.assertQuerysetEqual( Author.objects.filter(item__name='one') | Author.objects.filter(name='a3'), ['<Author: a1>', '<Author: a3>'] ) self.assertQuerysetEqual( Author.objects.filter(Q(item__name='one') | Q(name='a3')), ['<Author: a1>', '<Author: a3>'] ) self.assertQuerysetEqual( Author.objects.filter(Q(name='a3') | Q(item__name='one')), ['<Author: a1>', '<Author: a3>'] ) self.assertQuerysetEqual( Author.objects.filter(Q(item__name='three') | Q(report__name='r3')), ['<Author: a2>'] ) def test_ticket6074(self): # Merging two empty result sets shouldn't leave a queryset with no constraints # (which would match everything). self.assertQuerysetEqual(Author.objects.filter(Q(id__in=[])), []) self.assertQuerysetEqual( Author.objects.filter(Q(id__in=[]) | Q(id__in=[])), [] ) def test_tickets_1878_2939(self): self.assertEqual(Item.objects.values('creator').distinct().count(), 3) # Create something with a duplicate 'name' so that we can test multi-column # cases (which require some tricky SQL transformations under the covers). xx = Item(name='four', created=self.time1, creator=self.a2, note=self.n1) xx.save() self.assertEqual( Item.objects.exclude(name='two').values('creator', 'name').distinct().count(), 4 ) self.assertEqual( ( Item.objects .exclude(name='two') .extra(select={'foo': '%s'}, select_params=(1,)) .values('creator', 'name', 'foo') .distinct() .count() ), 4 ) self.assertEqual( ( Item.objects .exclude(name='two') .extra(select={'foo': '%s'}, select_params=(1,)) .values('creator', 'name') .distinct() .count() ), 4 ) xx.delete() def test_ticket7323(self): self.assertEqual(Item.objects.values('creator', 'name').count(), 4) def test_ticket2253(self): q1 = Item.objects.order_by('name') q2 = Item.objects.filter(id=self.i1.id) self.assertQuerysetEqual( q1, ['<Item: four>', '<Item: one>', '<Item: three>', '<Item: two>'] ) self.assertQuerysetEqual(q2, ['<Item: one>']) self.assertQuerysetEqual( (q1 | q2).order_by('name'), ['<Item: four>', '<Item: one>', '<Item: three>', '<Item: two>'] ) self.assertQuerysetEqual((q1 & q2).order_by('name'), ['<Item: one>']) q1 = Item.objects.filter(tags=self.t1) q2 = Item.objects.filter(note=self.n3, tags=self.t2) q3 = Item.objects.filter(creator=self.a4) self.assertQuerysetEqual( ((q1 & q2) | q3).order_by('name'), ['<Item: four>', '<Item: one>'] ) def test_order_by_tables(self): q1 = Item.objects.order_by('name') q2 = Item.objects.filter(id=self.i1.id) list(q2) combined_query = (q1 & q2).order_by('name').query self.assertEqual(len([ t for t in combined_query.alias_map if combined_query.alias_refcount[t] ]), 1) def test_order_by_join_unref(self): """ This test is related to the above one, testing that there aren't old JOINs in the query. """ qs = Celebrity.objects.order_by('greatest_fan__fan_of') self.assertIn('OUTER JOIN', str(qs.query)) qs = qs.order_by('id') self.assertNotIn('OUTER JOIN', str(qs.query)) def test_get_clears_ordering(self): """ get() should clear ordering for optimization purposes. """ with CaptureQueriesContext(connection) as captured_queries: Author.objects.order_by('name').get(pk=self.a1.pk) self.assertNotIn('order by', captured_queries[0]['sql'].lower()) def test_tickets_4088_4306(self): self.assertQuerysetEqual( Report.objects.filter(creator=1001), ['<Report: r1>'] ) self.assertQuerysetEqual( Report.objects.filter(creator__num=1001), ['<Report: r1>'] ) self.assertQuerysetEqual(Report.objects.filter(creator__id=1001), []) self.assertQuerysetEqual( Report.objects.filter(creator__id=self.a1.id), ['<Report: r1>'] ) self.assertQuerysetEqual( Report.objects.filter(creator__name='a1'), ['<Report: r1>'] ) def test_ticket4510(self): self.assertQuerysetEqual( Author.objects.filter(report__name='r1'), ['<Author: a1>'] ) def test_ticket7378(self): self.assertQuerysetEqual(self.a1.report_set.all(), ['<Report: r1>']) def test_tickets_5324_6704(self): self.assertQuerysetEqual( Item.objects.filter(tags__name='t4'), ['<Item: four>'] ) self.assertQuerysetEqual( Item.objects.exclude(tags__name='t4').order_by('name').distinct(), ['<Item: one>', '<Item: three>', '<Item: two>'] ) self.assertQuerysetEqual( Item.objects.exclude(tags__name='t4').order_by('name').distinct().reverse(), ['<Item: two>', '<Item: three>', '<Item: one>'] ) self.assertQuerysetEqual( Author.objects.exclude(item__name='one').distinct().order_by('name'), ['<Author: a2>', '<Author: a3>', '<Author: a4>'] ) # Excluding across a m2m relation when there is more than one related # object associated was problematic. self.assertQuerysetEqual( Item.objects.exclude(tags__name='t1').order_by('name'), ['<Item: four>', '<Item: three>'] ) self.assertQuerysetEqual( Item.objects.exclude(tags__name='t1').exclude(tags__name='t4'), ['<Item: three>'] ) # Excluding from a relation that cannot be NULL should not use outer joins. query = Item.objects.exclude(creator__in=[self.a1, self.a2]).query self.assertNotIn(LOUTER, [x.join_type for x in query.alias_map.values()]) # Similarly, when one of the joins cannot possibly, ever, involve NULL # values (Author -> ExtraInfo, in the following), it should never be # promoted to a left outer join. So the following query should only # involve one "left outer" join (Author -> Item is 0-to-many). qs = Author.objects.filter(id=self.a1.id).filter(Q(extra__note=self.n1) | Q(item__note=self.n3)) self.assertEqual( len([ x for x in qs.query.alias_map.values() if x.join_type == LOUTER and qs.query.alias_refcount[x.table_alias] ]), 1 ) # The previous changes shouldn't affect nullable foreign key joins. self.assertQuerysetEqual( Tag.objects.filter(parent__isnull=True).order_by('name'), ['<Tag: t1>'] ) self.assertQuerysetEqual( Tag.objects.exclude(parent__isnull=True).order_by('name'), ['<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>'] ) self.assertQuerysetEqual( Tag.objects.exclude(Q(parent__name='t1') | Q(parent__isnull=True)).order_by('name'), ['<Tag: t4>', '<Tag: t5>'] ) self.assertQuerysetEqual( Tag.objects.exclude(Q(parent__isnull=True) | Q(parent__name='t1')).order_by('name'), ['<Tag: t4>', '<Tag: t5>'] ) self.assertQuerysetEqual( Tag.objects.exclude(Q(parent__parent__isnull=True)).order_by('name'), ['<Tag: t4>', '<Tag: t5>'] ) self.assertQuerysetEqual( Tag.objects.filter(~Q(parent__parent__isnull=True)).order_by('name'), ['<Tag: t4>', '<Tag: t5>'] ) def test_ticket2091(self): t = Tag.objects.get(name='t4') self.assertQuerysetEqual( Item.objects.filter(tags__in=[t]), ['<Item: four>'] ) def test_avoid_infinite_loop_on_too_many_subqueries(self): x = Tag.objects.filter(pk=1) local_recursion_limit = sys.getrecursionlimit() // 16 msg = 'Maximum recursion depth exceeded: too many subqueries.' with self.assertRaisesMessage(RecursionError, msg): for i in range(local_recursion_limit + 2): x = Tag.objects.filter(pk__in=x) def test_reasonable_number_of_subq_aliases(self): x = Tag.objects.filter(pk=1) for _ in range(20): x = Tag.objects.filter(pk__in=x) self.assertEqual( x.query.subq_aliases, { 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'AA', 'AB', 'AC', 'AD', 'AE', 'AF', 'AG', 'AH', 'AI', 'AJ', 'AK', 'AL', 'AM', 'AN', } ) def test_heterogeneous_qs_combination(self): # Combining querysets built on different models should behave in a well-defined # fashion. We raise an error. with self.assertRaisesMessage(AssertionError, 'Cannot combine queries on two different base models.'): Author.objects.all() & Tag.objects.all() with self.assertRaisesMessage(AssertionError, 'Cannot combine queries on two different base models.'): Author.objects.all() | Tag.objects.all() def test_ticket3141(self): self.assertEqual(Author.objects.extra(select={'foo': '1'}).count(), 4) self.assertEqual( Author.objects.extra(select={'foo': '%s'}, select_params=(1,)).count(), 4 ) def test_ticket2400(self): self.assertQuerysetEqual( Author.objects.filter(item__isnull=True), ['<Author: a3>'] ) self.assertQuerysetEqual( Tag.objects.filter(item__isnull=True), ['<Tag: t5>'] ) def test_ticket2496(self): self.assertQuerysetEqual( Item.objects.extra(tables=['queries_author']).select_related().order_by('name')[:1], ['<Item: four>'] ) def test_error_raised_on_filter_with_dictionary(self): with self.assertRaisesMessage(FieldError, 'Cannot parse keyword query as dict'): Note.objects.filter({'note': 'n1', 'misc': 'foo'}) def test_tickets_2076_7256(self): # Ordering on related tables should be possible, even if the table is # not otherwise involved. self.assertQuerysetEqual( Item.objects.order_by('note__note', 'name'), ['<Item: two>', '<Item: four>', '<Item: one>', '<Item: three>'] ) # Ordering on a related field should use the remote model's default # ordering as a final step. self.assertQuerysetEqual( Author.objects.order_by('extra', '-name'), ['<Author: a2>', '<Author: a1>', '<Author: a4>', '<Author: a3>'] ) # Using remote model default ordering can span multiple models (in this # case, Cover is ordered by Item's default, which uses Note's default). self.assertQuerysetEqual( Cover.objects.all(), ['<Cover: first>', '<Cover: second>'] ) # If the remote model does not have a default ordering, we order by its 'id' # field. self.assertQuerysetEqual( Item.objects.order_by('creator', 'name'), ['<Item: one>', '<Item: three>', '<Item: two>', '<Item: four>'] ) # Ordering by a many-valued attribute (e.g. a many-to-many or reverse # ForeignKey) is legal, but the results might not make sense. That # isn't Django's problem. Garbage in, garbage out. self.assertQuerysetEqual( Item.objects.filter(tags__isnull=False).order_by('tags', 'id'), ['<Item: one>', '<Item: two>', '<Item: one>', '<Item: two>', '<Item: four>'] ) # If we replace the default ordering, Django adjusts the required # tables automatically. Item normally requires a join with Note to do # the default ordering, but that isn't needed here. qs = Item.objects.order_by('name') self.assertQuerysetEqual( qs, ['<Item: four>', '<Item: one>', '<Item: three>', '<Item: two>'] ) self.assertEqual(len(qs.query.alias_map), 1) def test_tickets_2874_3002(self): qs = Item.objects.select_related().order_by('note__note', 'name') self.assertQuerysetEqual( qs, ['<Item: two>', '<Item: four>', '<Item: one>', '<Item: three>'] ) # This is also a good select_related() test because there are multiple # Note entries in the SQL. The two Note items should be different. self.assertEqual(repr(qs[0].note), '<Note: n2>') self.assertEqual(repr(qs[0].creator.extra.note), '<Note: n1>') def test_ticket3037(self): self.assertQuerysetEqual( Item.objects.filter(Q(creator__name='a3', name='two') | Q(creator__name='a4', name='four')), ['<Item: four>'] ) def test_tickets_5321_7070(self): # Ordering columns must be included in the output columns. Note that # this means results that might otherwise be distinct are not (if there # are multiple values in the ordering cols), as in this example. This # isn't a bug; it's a warning to be careful with the selection of # ordering columns. self.assertSequenceEqual( Note.objects.values('misc').distinct().order_by('note', '-misc'), [{'misc': 'foo'}, {'misc': 'bar'}, {'misc': 'foo'}] ) def test_ticket4358(self): # If you don't pass any fields to values(), relation fields are # returned as "foo_id" keys, not "foo". For consistency, you should be # able to pass "foo_id" in the fields list and have it work, too. We # actually allow both "foo" and "foo_id". # The *_id version is returned by default. self.assertIn('note_id', ExtraInfo.objects.values()[0]) # You can also pass it in explicitly. self.assertSequenceEqual(ExtraInfo.objects.values('note_id'), [{'note_id': 1}, {'note_id': 2}]) # ...or use the field name. self.assertSequenceEqual(ExtraInfo.objects.values('note'), [{'note': 1}, {'note': 2}]) def test_ticket6154(self): # Multiple filter statements are joined using "AND" all the time. self.assertQuerysetEqual( Author.objects.filter(id=self.a1.id).filter(Q(extra__note=self.n1) | Q(item__note=self.n3)), ['<Author: a1>'] ) self.assertQuerysetEqual( Author.objects.filter(Q(extra__note=self.n1) | Q(item__note=self.n3)).filter(id=self.a1.id), ['<Author: a1>'] ) def test_ticket6981(self): self.assertQuerysetEqual( Tag.objects.select_related('parent').order_by('name'), ['<Tag: t1>', '<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>'] ) def test_ticket9926(self): self.assertQuerysetEqual( Tag.objects.select_related("parent", "category").order_by('name'), ['<Tag: t1>', '<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>'] ) self.assertQuerysetEqual( Tag.objects.select_related('parent', "parent__category").order_by('name'), ['<Tag: t1>', '<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>'] ) def test_tickets_6180_6203(self): # Dates with limits and/or counts self.assertEqual(Item.objects.count(), 4) self.assertEqual(Item.objects.datetimes('created', 'month').count(), 1) self.assertEqual(Item.objects.datetimes('created', 'day').count(), 2) self.assertEqual(len(Item.objects.datetimes('created', 'day')), 2) self.assertEqual(Item.objects.datetimes('created', 'day')[0], datetime.datetime(2007, 12, 19, 0, 0)) def test_tickets_7087_12242(self): # Dates with extra select columns self.assertQuerysetEqual( Item.objects.datetimes('created', 'day').extra(select={'a': 1}), ['datetime.datetime(2007, 12, 19, 0, 0)', 'datetime.datetime(2007, 12, 20, 0, 0)'] ) self.assertQuerysetEqual( Item.objects.extra(select={'a': 1}).datetimes('created', 'day'), ['datetime.datetime(2007, 12, 19, 0, 0)', 'datetime.datetime(2007, 12, 20, 0, 0)'] ) name = "one" self.assertQuerysetEqual( Item.objects.datetimes('created', 'day').extra(where=['name=%s'], params=[name]), ['datetime.datetime(2007, 12, 19, 0, 0)'] ) self.assertQuerysetEqual( Item.objects.extra(where=['name=%s'], params=[name]).datetimes('created', 'day'), ['datetime.datetime(2007, 12, 19, 0, 0)'] ) def test_ticket7155(self): # Nullable dates self.assertQuerysetEqual( Item.objects.datetimes('modified', 'day'), ['datetime.datetime(2007, 12, 19, 0, 0)'] ) def test_ticket7098(self): # Make sure semi-deprecated ordering by related models syntax still # works. self.assertSequenceEqual( Item.objects.values('note__note').order_by('queries_note.note', 'id'), [{'note__note': 'n2'}, {'note__note': 'n3'}, {'note__note': 'n3'}, {'note__note': 'n3'}] ) def test_ticket7096(self): # Make sure exclude() with multiple conditions continues to work. self.assertQuerysetEqual( Tag.objects.filter(parent=self.t1, name='t3').order_by('name'), ['<Tag: t3>'] ) self.assertQuerysetEqual( Tag.objects.exclude(parent=self.t1, name='t3').order_by('name'), ['<Tag: t1>', '<Tag: t2>', '<Tag: t4>', '<Tag: t5>'] ) self.assertQuerysetEqual( Item.objects.exclude(tags__name='t1', name='one').order_by('name').distinct(), ['<Item: four>', '<Item: three>', '<Item: two>'] ) self.assertQuerysetEqual( Item.objects.filter(name__in=['three', 'four']).exclude(tags__name='t1').order_by('name'), ['<Item: four>', '<Item: three>'] ) # More twisted cases, involving nested negations. self.assertQuerysetEqual( Item.objects.exclude(~Q(tags__name='t1', name='one')), ['<Item: one>'] ) self.assertQuerysetEqual( Item.objects.filter(~Q(tags__name='t1', name='one'), name='two'), ['<Item: two>'] ) self.assertQuerysetEqual( Item.objects.exclude(~Q(tags__name='t1', name='one'), name='two'), ['<Item: four>', '<Item: one>', '<Item: three>'] ) def test_tickets_7204_7506(self): # Make sure querysets with related fields can be pickled. If this # doesn't crash, it's a Good Thing. pickle.dumps(Item.objects.all()) def test_ticket7813(self): # We should also be able to pickle things that use select_related(). # The only tricky thing here is to ensure that we do the related # selections properly after unpickling. qs = Item.objects.select_related() query = qs.query.get_compiler(qs.db).as_sql()[0] query2 = pickle.loads(pickle.dumps(qs.query)) self.assertEqual( query2.get_compiler(qs.db).as_sql()[0], query ) def test_deferred_load_qs_pickling(self): # Check pickling of deferred-loading querysets qs = Item.objects.defer('name', 'creator') q2 = pickle.loads(pickle.dumps(qs)) self.assertEqual(list(qs), list(q2)) q3 = pickle.loads(pickle.dumps(qs, pickle.HIGHEST_PROTOCOL)) self.assertEqual(list(qs), list(q3)) def test_ticket7277(self): self.assertQuerysetEqual( self.n1.annotation_set.filter( Q(tag=self.t5) | Q(tag__children=self.t5) | Q(tag__children__children=self.t5) ), ['<Annotation: a1>'] ) def test_tickets_7448_7707(self): # Complex objects should be converted to strings before being used in # lookups. self.assertQuerysetEqual( Item.objects.filter(created__in=[self.time1, self.time2]), ['<Item: one>', '<Item: two>'] ) def test_ticket7235(self): # An EmptyQuerySet should not raise exceptions if it is filtered. Eaten.objects.create(meal='m') q = Eaten.objects.none() with self.assertNumQueries(0): self.assertQuerysetEqual(q.all(), []) self.assertQuerysetEqual(q.filter(meal='m'), []) self.assertQuerysetEqual(q.exclude(meal='m'), []) self.assertQuerysetEqual(q.complex_filter({'pk': 1}), []) self.assertQuerysetEqual(q.select_related('food'), []) self.assertQuerysetEqual(q.annotate(Count('food')), []) self.assertQuerysetEqual(q.order_by('meal', 'food'), []) self.assertQuerysetEqual(q.distinct(), []) self.assertQuerysetEqual( q.extra(select={'foo': "1"}), [] ) self.assertQuerysetEqual(q.reverse(), []) q.query.low_mark = 1 with self.assertRaisesMessage(AssertionError, 'Cannot change a query once a slice has been taken'): q.extra(select={'foo': "1"}) self.assertQuerysetEqual(q.defer('meal'), []) self.assertQuerysetEqual(q.only('meal'), []) def test_ticket7791(self): # There were "issues" when ordering and distinct-ing on fields related # via ForeignKeys. self.assertEqual( len(Note.objects.order_by('extrainfo__info').distinct()), 3 ) # Pickling of QuerySets using datetimes() should work. qs = Item.objects.datetimes('created', 'month') pickle.loads(pickle.dumps(qs)) def test_ticket9997(self): # If a ValuesList or Values queryset is passed as an inner query, we # make sure it's only requesting a single value and use that as the # thing to select. self.assertQuerysetEqual( Tag.objects.filter(name__in=Tag.objects.filter(parent=self.t1).values('name')), ['<Tag: t2>', '<Tag: t3>'] ) # Multi-valued values() and values_list() querysets should raise errors. with self.assertRaisesMessage(TypeError, 'Cannot use multi-field values as a filter value.'): Tag.objects.filter(name__in=Tag.objects.filter(parent=self.t1).values('name', 'id')) with self.assertRaisesMessage(TypeError, 'Cannot use multi-field values as a filter value.'): Tag.objects.filter(name__in=Tag.objects.filter(parent=self.t1).values_list('name', 'id')) def test_ticket9985(self): # qs.values_list(...).values(...) combinations should work. self.assertSequenceEqual( Note.objects.values_list("note", flat=True).values("id").order_by("id"), [{'id': 1}, {'id': 2}, {'id': 3}] ) self.assertQuerysetEqual( Annotation.objects.filter(notes__in=Note.objects.filter(note="n1").values_list('note').values('id')), ['<Annotation: a1>'] ) def test_ticket10205(self): # When bailing out early because of an empty "__in" filter, we need # to set things up correctly internally so that subqueries can continue properly. self.assertEqual(Tag.objects.filter(name__in=()).update(name="foo"), 0) def test_ticket10432(self): # Testing an empty "__in" filter with a generator as the value. def f(): return iter([]) n_obj = Note.objects.all()[0] def g(): yield n_obj.pk self.assertQuerysetEqual(Note.objects.filter(pk__in=f()), []) self.assertEqual(list(Note.objects.filter(pk__in=g())), [n_obj]) def test_ticket10742(self): # Queries used in an __in clause don't execute subqueries subq = Author.objects.filter(num__lt=3000) qs = Author.objects.filter(pk__in=subq) self.assertQuerysetEqual(qs, ['<Author: a1>', '<Author: a2>']) # The subquery result cache should not be populated self.assertIsNone(subq._result_cache) subq = Author.objects.filter(num__lt=3000) qs = Author.objects.exclude(pk__in=subq) self.assertQuerysetEqual(qs, ['<Author: a3>', '<Author: a4>']) # The subquery result cache should not be populated self.assertIsNone(subq._result_cache) subq = Author.objects.filter(num__lt=3000) self.assertQuerysetEqual( Author.objects.filter(Q(pk__in=subq) & Q(name='a1')), ['<Author: a1>'] ) # The subquery result cache should not be populated self.assertIsNone(subq._result_cache) def test_ticket7076(self): # Excluding shouldn't eliminate NULL entries. self.assertQuerysetEqual( Item.objects.exclude(modified=self.time1).order_by('name'), ['<Item: four>', '<Item: three>', '<Item: two>'] ) self.assertQuerysetEqual( Tag.objects.exclude(parent__name=self.t1.name), ['<Tag: t1>', '<Tag: t4>', '<Tag: t5>'] ) def test_ticket7181(self): # Ordering by related tables should accommodate nullable fields (this # test is a little tricky, since NULL ordering is database dependent. # Instead, we just count the number of results). self.assertEqual(len(Tag.objects.order_by('parent__name')), 5) # Empty querysets can be merged with others. self.assertQuerysetEqual( Note.objects.none() | Note.objects.all(), ['<Note: n1>', '<Note: n2>', '<Note: n3>'] ) self.assertQuerysetEqual( Note.objects.all() | Note.objects.none(), ['<Note: n1>', '<Note: n2>', '<Note: n3>'] ) self.assertQuerysetEqual(Note.objects.none() & Note.objects.all(), []) self.assertQuerysetEqual(Note.objects.all() & Note.objects.none(), []) def test_ticket9411(self): # Make sure bump_prefix() (an internal Query method) doesn't (re-)break. It's # sufficient that this query runs without error. qs = Tag.objects.values_list('id', flat=True).order_by('id') qs.query.bump_prefix(qs.query) first = qs[0] self.assertEqual(list(qs), list(range(first, first + 5))) def test_ticket8439(self): # Complex combinations of conjunctions, disjunctions and nullable # relations. self.assertQuerysetEqual( Author.objects.filter(Q(item__note__extrainfo=self.e2) | Q(report=self.r1, name='xyz')), ['<Author: a2>'] ) self.assertQuerysetEqual( Author.objects.filter(Q(report=self.r1, name='xyz') | Q(item__note__extrainfo=self.e2)), ['<Author: a2>'] ) self.assertQuerysetEqual( Annotation.objects.filter(Q(tag__parent=self.t1) | Q(notes__note='n1', name='a1')), ['<Annotation: a1>'] ) xx = ExtraInfo.objects.create(info='xx', note=self.n3) self.assertQuerysetEqual( Note.objects.filter(Q(extrainfo__author=self.a1) | Q(extrainfo=xx)), ['<Note: n1>', '<Note: n3>'] ) q = Note.objects.filter(Q(extrainfo__author=self.a1) | Q(extrainfo=xx)).query self.assertEqual( len([x for x in q.alias_map.values() if x.join_type == LOUTER and q.alias_refcount[x.table_alias]]), 1 ) def test_ticket17429(self): """ Meta.ordering=None works the same as Meta.ordering=[] """ original_ordering = Tag._meta.ordering Tag._meta.ordering = None try: self.assertQuerysetEqual( Tag.objects.all(), ['<Tag: t1>', '<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>'], ordered=False ) finally: Tag._meta.ordering = original_ordering def test_exclude(self): self.assertQuerysetEqual( Item.objects.exclude(tags__name='t4'), [repr(i) for i in Item.objects.filter(~Q(tags__name='t4'))]) self.assertQuerysetEqual( Item.objects.exclude(Q(tags__name='t4') | Q(tags__name='t3')), [repr(i) for i in Item.objects.filter(~(Q(tags__name='t4') | Q(tags__name='t3')))]) self.assertQuerysetEqual( Item.objects.exclude(Q(tags__name='t4') | ~Q(tags__name='t3')), [repr(i) for i in Item.objects.filter(~(Q(tags__name='t4') | ~Q(tags__name='t3')))]) def test_nested_exclude(self): self.assertQuerysetEqual( Item.objects.exclude(~Q(tags__name='t4')), [repr(i) for i in Item.objects.filter(~~Q(tags__name='t4'))]) def test_double_exclude(self): self.assertQuerysetEqual( Item.objects.filter(Q(tags__name='t4')), [repr(i) for i in Item.objects.filter(~~Q(tags__name='t4'))]) self.assertQuerysetEqual( Item.objects.filter(Q(tags__name='t4')), [repr(i) for i in Item.objects.filter(~Q(~Q(tags__name='t4')))]) def test_exclude_in(self): self.assertQuerysetEqual( Item.objects.exclude(Q(tags__name__in=['t4', 't3'])), [repr(i) for i in Item.objects.filter(~Q(tags__name__in=['t4', 't3']))]) self.assertQuerysetEqual( Item.objects.filter(Q(tags__name__in=['t4', 't3'])), [repr(i) for i in Item.objects.filter(~~Q(tags__name__in=['t4', 't3']))]) def test_ticket_10790_1(self): # Querying direct fields with isnull should trim the left outer join. # It also should not create INNER JOIN. q = Tag.objects.filter(parent__isnull=True) self.assertQuerysetEqual(q, ['<Tag: t1>']) self.assertNotIn('JOIN', str(q.query)) q = Tag.objects.filter(parent__isnull=False) self.assertQuerysetEqual( q, ['<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>'], ) self.assertNotIn('JOIN', str(q.query)) q = Tag.objects.exclude(parent__isnull=True) self.assertQuerysetEqual( q, ['<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>'], ) self.assertNotIn('JOIN', str(q.query)) q = Tag.objects.exclude(parent__isnull=False) self.assertQuerysetEqual(q, ['<Tag: t1>']) self.assertNotIn('JOIN', str(q.query)) q = Tag.objects.exclude(parent__parent__isnull=False) self.assertQuerysetEqual( q, ['<Tag: t1>', '<Tag: t2>', '<Tag: t3>'], ) self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 1) self.assertNotIn('INNER JOIN', str(q.query)) def test_ticket_10790_2(self): # Querying across several tables should strip only the last outer join, # while preserving the preceding inner joins. q = Tag.objects.filter(parent__parent__isnull=False) self.assertQuerysetEqual( q, ['<Tag: t4>', '<Tag: t5>'], ) self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0) self.assertEqual(str(q.query).count('INNER JOIN'), 1) # Querying without isnull should not convert anything to left outer join. q = Tag.objects.filter(parent__parent=self.t1) self.assertQuerysetEqual( q, ['<Tag: t4>', '<Tag: t5>'], ) self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0) self.assertEqual(str(q.query).count('INNER JOIN'), 1) def test_ticket_10790_3(self): # Querying via indirect fields should populate the left outer join q = NamedCategory.objects.filter(tag__isnull=True) self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 1) # join to dumbcategory ptr_id self.assertEqual(str(q.query).count('INNER JOIN'), 1) self.assertQuerysetEqual(q, []) # Querying across several tables should strip only the last join, while # preserving the preceding left outer joins. q = NamedCategory.objects.filter(tag__parent__isnull=True) self.assertEqual(str(q.query).count('INNER JOIN'), 1) self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 1) self.assertQuerysetEqual(q, ['<NamedCategory: Generic>']) def test_ticket_10790_4(self): # Querying across m2m field should not strip the m2m table from join. q = Author.objects.filter(item__tags__isnull=True) self.assertQuerysetEqual( q, ['<Author: a2>', '<Author: a3>'], ) self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 2) self.assertNotIn('INNER JOIN', str(q.query)) q = Author.objects.filter(item__tags__parent__isnull=True) self.assertQuerysetEqual( q, ['<Author: a1>', '<Author: a2>', '<Author: a2>', '<Author: a3>'], ) self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 3) self.assertNotIn('INNER JOIN', str(q.query)) def test_ticket_10790_5(self): # Querying with isnull=False across m2m field should not create outer joins q = Author.objects.filter(item__tags__isnull=False) self.assertQuerysetEqual( q, ['<Author: a1>', '<Author: a1>', '<Author: a2>', '<Author: a2>', '<Author: a4>'] ) self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0) self.assertEqual(str(q.query).count('INNER JOIN'), 2) q = Author.objects.filter(item__tags__parent__isnull=False) self.assertQuerysetEqual( q, ['<Author: a1>', '<Author: a2>', '<Author: a4>'] ) self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0) self.assertEqual(str(q.query).count('INNER JOIN'), 3) q = Author.objects.filter(item__tags__parent__parent__isnull=False) self.assertQuerysetEqual( q, ['<Author: a4>'] ) self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0) self.assertEqual(str(q.query).count('INNER JOIN'), 4) def test_ticket_10790_6(self): # Querying with isnull=True across m2m field should not create inner joins # and strip last outer join q = Author.objects.filter(item__tags__parent__parent__isnull=True) self.assertQuerysetEqual( q, ['<Author: a1>', '<Author: a1>', '<Author: a2>', '<Author: a2>', '<Author: a2>', '<Author: a3>'] ) self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 4) self.assertEqual(str(q.query).count('INNER JOIN'), 0) q = Author.objects.filter(item__tags__parent__isnull=True) self.assertQuerysetEqual( q, ['<Author: a1>', '<Author: a2>', '<Author: a2>', '<Author: a3>'] ) self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 3) self.assertEqual(str(q.query).count('INNER JOIN'), 0) def test_ticket_10790_7(self): # Reverse querying with isnull should not strip the join q = Author.objects.filter(item__isnull=True) self.assertQuerysetEqual( q, ['<Author: a3>'] ) self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 1) self.assertEqual(str(q.query).count('INNER JOIN'), 0) q = Author.objects.filter(item__isnull=False) self.assertQuerysetEqual( q, ['<Author: a1>', '<Author: a2>', '<Author: a2>', '<Author: a4>'] ) self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0) self.assertEqual(str(q.query).count('INNER JOIN'), 1) def test_ticket_10790_8(self): # Querying with combined q-objects should also strip the left outer join q = Tag.objects.filter(Q(parent__isnull=True) | Q(parent=self.t1)) self.assertQuerysetEqual( q, ['<Tag: t1>', '<Tag: t2>', '<Tag: t3>'] ) self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0) self.assertEqual(str(q.query).count('INNER JOIN'), 0) def test_ticket_10790_combine(self): # Combining queries should not re-populate the left outer join q1 = Tag.objects.filter(parent__isnull=True) q2 = Tag.objects.filter(parent__isnull=False) q3 = q1 | q2 self.assertQuerysetEqual( q3, ['<Tag: t1>', '<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>'], ) self.assertEqual(str(q3.query).count('LEFT OUTER JOIN'), 0) self.assertEqual(str(q3.query).count('INNER JOIN'), 0) q3 = q1 & q2 self.assertQuerysetEqual(q3, []) self.assertEqual(str(q3.query).count('LEFT OUTER JOIN'), 0) self.assertEqual(str(q3.query).count('INNER JOIN'), 0) q2 = Tag.objects.filter(parent=self.t1) q3 = q1 | q2 self.assertQuerysetEqual( q3, ['<Tag: t1>', '<Tag: t2>', '<Tag: t3>'] ) self.assertEqual(str(q3.query).count('LEFT OUTER JOIN'), 0) self.assertEqual(str(q3.query).count('INNER JOIN'), 0) q3 = q2 | q1 self.assertQuerysetEqual( q3, ['<Tag: t1>', '<Tag: t2>', '<Tag: t3>'] ) self.assertEqual(str(q3.query).count('LEFT OUTER JOIN'), 0) self.assertEqual(str(q3.query).count('INNER JOIN'), 0) q1 = Tag.objects.filter(parent__isnull=True) q2 = Tag.objects.filter(parent__parent__isnull=True) q3 = q1 | q2 self.assertQuerysetEqual( q3, ['<Tag: t1>', '<Tag: t2>', '<Tag: t3>'] ) self.assertEqual(str(q3.query).count('LEFT OUTER JOIN'), 1) self.assertEqual(str(q3.query).count('INNER JOIN'), 0) q3 = q2 | q1 self.assertQuerysetEqual( q3, ['<Tag: t1>', '<Tag: t2>', '<Tag: t3>'] ) self.assertEqual(str(q3.query).count('LEFT OUTER JOIN'), 1) self.assertEqual(str(q3.query).count('INNER JOIN'), 0) def test_ticket19672(self): self.assertQuerysetEqual( Report.objects.filter(Q(creator__isnull=False) & ~Q(creator__extra__value=41)), ['<Report: r1>'] ) def test_ticket_20250(self): # A negated Q along with an annotated queryset failed in Django 1.4 qs = Author.objects.annotate(Count('item')) qs = qs.filter(~Q(extra__value=0)).order_by('name') self.assertIn('SELECT', str(qs.query)) self.assertQuerysetEqual( qs, ['<Author: a1>', '<Author: a2>', '<Author: a3>', '<Author: a4>'] ) def test_lookup_constraint_fielderror(self): msg = ( "Cannot resolve keyword 'unknown_field' into field. Choices are: " "annotation, category, category_id, children, id, item, " "managedmodel, name, note, parent, parent_id" ) with self.assertRaisesMessage(FieldError, msg): Tag.objects.filter(unknown_field__name='generic') def test_common_mixed_case_foreign_keys(self): """ Valid query should be generated when fields fetched from joined tables include FKs whose names only differ by case. """ c1 = SimpleCategory.objects.create(name='c1') c2 = SimpleCategory.objects.create(name='c2') c3 = SimpleCategory.objects.create(name='c3') category = CategoryItem.objects.create(category=c1) mixed_case_field_category = MixedCaseFieldCategoryItem.objects.create(CaTeGoRy=c2) mixed_case_db_column_category = MixedCaseDbColumnCategoryItem.objects.create(category=c3) CommonMixedCaseForeignKeys.objects.create( category=category, mixed_case_field_category=mixed_case_field_category, mixed_case_db_column_category=mixed_case_db_column_category, ) qs = CommonMixedCaseForeignKeys.objects.values( 'category', 'mixed_case_field_category', 'mixed_case_db_column_category', 'category__category', 'mixed_case_field_category__CaTeGoRy', 'mixed_case_db_column_category__category', ) self.assertTrue(qs.first()) def test_excluded_intermediary_m2m_table_joined(self): self.assertSequenceEqual( Note.objects.filter(~Q(tag__annotation__name=F('note'))), [self.n1, self.n2, self.n3], ) self.assertSequenceEqual( Note.objects.filter(tag__annotation__name='a1').filter(~Q(tag__annotation__name=F('note'))), [], ) class Queries2Tests(TestCase): @classmethod def setUpTestData(cls): Number.objects.create(num=4) Number.objects.create(num=8) Number.objects.create(num=12) def test_ticket4289(self): # A slight variation on the restricting the filtering choices by the # lookup constraints. self.assertQuerysetEqual(Number.objects.filter(num__lt=4), []) self.assertQuerysetEqual(Number.objects.filter(num__gt=8, num__lt=12), []) self.assertQuerysetEqual( Number.objects.filter(num__gt=8, num__lt=13), ['<Number: 12>'] ) self.assertQuerysetEqual( Number.objects.filter(Q(num__lt=4) | Q(num__gt=8, num__lt=12)), [] ) self.assertQuerysetEqual( Number.objects.filter(Q(num__gt=8, num__lt=12) | Q(num__lt=4)), [] ) self.assertQuerysetEqual( Number.objects.filter(Q(num__gt=8) & Q(num__lt=12) | Q(num__lt=4)), [] ) self.assertQuerysetEqual( Number.objects.filter(Q(num__gt=7) & Q(num__lt=12) | Q(num__lt=4)), ['<Number: 8>'] ) def test_ticket12239(self): # Custom lookups are registered to round float values correctly on gte # and lt IntegerField queries. self.assertQuerysetEqual( Number.objects.filter(num__gt=11.9), ['<Number: 12>'] ) self.assertQuerysetEqual(Number.objects.filter(num__gt=12), []) self.assertQuerysetEqual(Number.objects.filter(num__gt=12.0), []) self.assertQuerysetEqual(Number.objects.filter(num__gt=12.1), []) self.assertQuerysetEqual( Number.objects.filter(num__lt=12), ['<Number: 4>', '<Number: 8>'], ordered=False ) self.assertQuerysetEqual( Number.objects.filter(num__lt=12.0), ['<Number: 4>', '<Number: 8>'], ordered=False ) self.assertQuerysetEqual( Number.objects.filter(num__lt=12.1), ['<Number: 4>', '<Number: 8>', '<Number: 12>'], ordered=False ) self.assertQuerysetEqual( Number.objects.filter(num__gte=11.9), ['<Number: 12>'] ) self.assertQuerysetEqual( Number.objects.filter(num__gte=12), ['<Number: 12>'] ) self.assertQuerysetEqual( Number.objects.filter(num__gte=12.0), ['<Number: 12>'] ) self.assertQuerysetEqual(Number.objects.filter(num__gte=12.1), []) self.assertQuerysetEqual(Number.objects.filter(num__gte=12.9), []) self.assertQuerysetEqual( Number.objects.filter(num__lte=11.9), ['<Number: 4>', '<Number: 8>'], ordered=False ) self.assertQuerysetEqual( Number.objects.filter(num__lte=12), ['<Number: 4>', '<Number: 8>', '<Number: 12>'], ordered=False ) self.assertQuerysetEqual( Number.objects.filter(num__lte=12.0), ['<Number: 4>', '<Number: 8>', '<Number: 12>'], ordered=False ) self.assertQuerysetEqual( Number.objects.filter(num__lte=12.1), ['<Number: 4>', '<Number: 8>', '<Number: 12>'], ordered=False ) self.assertQuerysetEqual( Number.objects.filter(num__lte=12.9), ['<Number: 4>', '<Number: 8>', '<Number: 12>'], ordered=False ) def test_ticket7759(self): # Count should work with a partially read result set. count = Number.objects.count() qs = Number.objects.all() def run(): for obj in qs: return qs.count() == count self.assertTrue(run()) class Queries3Tests(TestCase): def test_ticket7107(self): # This shouldn't create an infinite loop. self.assertQuerysetEqual(Valid.objects.all(), []) def test_ticket8683(self): # An error should be raised when QuerySet.datetimes() is passed the # wrong type of field. with self.assertRaisesMessage(AssertionError, "'name' isn't a DateField, TimeField, or DateTimeField."): Item.objects.datetimes('name', 'month') def test_ticket22023(self): with self.assertRaisesMessage(TypeError, "Cannot call only() after .values() or .values_list()"): Valid.objects.values().only() with self.assertRaisesMessage(TypeError, "Cannot call defer() after .values() or .values_list()"): Valid.objects.values().defer() class Queries4Tests(TestCase): @classmethod def setUpTestData(cls): generic = NamedCategory.objects.create(name="Generic") cls.t1 = Tag.objects.create(name='t1', category=generic) n1 = Note.objects.create(note='n1', misc='foo') n2 = Note.objects.create(note='n2', misc='bar') e1 = ExtraInfo.objects.create(info='e1', note=n1) e2 = ExtraInfo.objects.create(info='e2', note=n2) cls.a1 = Author.objects.create(name='a1', num=1001, extra=e1) cls.a3 = Author.objects.create(name='a3', num=3003, extra=e2) cls.r1 = Report.objects.create(name='r1', creator=cls.a1) cls.r2 = Report.objects.create(name='r2', creator=cls.a3) cls.r3 = Report.objects.create(name='r3') Item.objects.create(name='i1', created=datetime.datetime.now(), note=n1, creator=cls.a1) Item.objects.create(name='i2', created=datetime.datetime.now(), note=n1, creator=cls.a3) def test_ticket24525(self): tag = Tag.objects.create() anth100 = tag.note_set.create(note='ANTH', misc='100') math101 = tag.note_set.create(note='MATH', misc='101') s1 = tag.annotation_set.create(name='1') s2 = tag.annotation_set.create(name='2') s1.notes.set([math101, anth100]) s2.notes.set([math101]) result = math101.annotation_set.all() & tag.annotation_set.exclude(notes__in=[anth100]) self.assertEqual(list(result), [s2]) def test_ticket11811(self): unsaved_category = NamedCategory(name="Other") msg = 'Unsaved model instance <NamedCategory: Other> cannot be used in an ORM query.' with self.assertRaisesMessage(ValueError, msg): Tag.objects.filter(pk=self.t1.pk).update(category=unsaved_category) def test_ticket14876(self): # Note: when combining the query we need to have information available # about the join type of the trimmed "creator__isnull" join. If we # don't have that information, then the join is created as INNER JOIN # and results will be incorrect. q1 = Report.objects.filter(Q(creator__isnull=True) | Q(creator__extra__info='e1')) q2 = Report.objects.filter(Q(creator__isnull=True)) | Report.objects.filter(Q(creator__extra__info='e1')) self.assertQuerysetEqual(q1, ["<Report: r1>", "<Report: r3>"], ordered=False) self.assertEqual(str(q1.query), str(q2.query)) q1 = Report.objects.filter(Q(creator__extra__info='e1') | Q(creator__isnull=True)) q2 = Report.objects.filter(Q(creator__extra__info='e1')) | Report.objects.filter(Q(creator__isnull=True)) self.assertQuerysetEqual(q1, ["<Report: r1>", "<Report: r3>"], ordered=False) self.assertEqual(str(q1.query), str(q2.query)) q1 = Item.objects.filter(Q(creator=self.a1) | Q(creator__report__name='r1')).order_by() q2 = ( Item.objects .filter(Q(creator=self.a1)).order_by() | Item.objects.filter(Q(creator__report__name='r1')) .order_by() ) self.assertQuerysetEqual(q1, ["<Item: i1>"]) self.assertEqual(str(q1.query), str(q2.query)) q1 = Item.objects.filter(Q(creator__report__name='e1') | Q(creator=self.a1)).order_by() q2 = ( Item.objects.filter(Q(creator__report__name='e1')).order_by() | Item.objects.filter(Q(creator=self.a1)).order_by() ) self.assertQuerysetEqual(q1, ["<Item: i1>"]) self.assertEqual(str(q1.query), str(q2.query)) def test_combine_join_reuse(self): # Joins having identical connections are correctly recreated in the # rhs query, in case the query is ORed together (#18748). Report.objects.create(name='r4', creator=self.a1) q1 = Author.objects.filter(report__name='r5') q2 = Author.objects.filter(report__name='r4').filter(report__name='r1') combined = q1 | q2 self.assertEqual(str(combined.query).count('JOIN'), 2) self.assertEqual(len(combined), 1) self.assertEqual(combined[0].name, 'a1') def test_join_reuse_order(self): # Join aliases are reused in order. This shouldn't raise AssertionError # because change_map contains a circular reference (#26522). s1 = School.objects.create() s2 = School.objects.create() s3 = School.objects.create() t1 = Teacher.objects.create() otherteachers = Teacher.objects.exclude(pk=t1.pk).exclude(friends=t1) qs1 = otherteachers.filter(schools=s1).filter(schools=s2) qs2 = otherteachers.filter(schools=s1).filter(schools=s3) self.assertQuerysetEqual(qs1 | qs2, []) def test_ticket7095(self): # Updates that are filtered on the model being updated are somewhat # tricky in MySQL. ManagedModel.objects.create(data='mm1', tag=self.t1, public=True) self.assertEqual(ManagedModel.objects.update(data='mm'), 1) # A values() or values_list() query across joined models must use outer # joins appropriately. # Note: In Oracle, we expect a null CharField to return '' instead of # None. if connection.features.interprets_empty_strings_as_nulls: expected_null_charfield_repr = '' else: expected_null_charfield_repr = None self.assertSequenceEqual( Report.objects.values_list("creator__extra__info", flat=True).order_by("name"), ['e1', 'e2', expected_null_charfield_repr], ) # Similarly for select_related(), joins beyond an initial nullable join # must use outer joins so that all results are included. self.assertQuerysetEqual( Report.objects.select_related("creator", "creator__extra").order_by("name"), ['<Report: r1>', '<Report: r2>', '<Report: r3>'] ) # When there are multiple paths to a table from another table, we have # to be careful not to accidentally reuse an inappropriate join when # using select_related(). We used to return the parent's Detail record # here by mistake. d1 = Detail.objects.create(data="d1") d2 = Detail.objects.create(data="d2") m1 = Member.objects.create(name="m1", details=d1) m2 = Member.objects.create(name="m2", details=d2) Child.objects.create(person=m2, parent=m1) obj = m1.children.select_related("person__details")[0] self.assertEqual(obj.person.details.data, 'd2') def test_order_by_resetting(self): # Calling order_by() with no parameters removes any existing ordering on the # model. But it should still be possible to add new ordering after that. qs = Author.objects.order_by().order_by('name') self.assertIn('ORDER BY', qs.query.get_compiler(qs.db).as_sql()[0]) def test_order_by_reverse_fk(self): # It is possible to order by reverse of foreign key, although that can lead # to duplicate results. c1 = SimpleCategory.objects.create(name="category1") c2 = SimpleCategory.objects.create(name="category2") CategoryItem.objects.create(category=c1) CategoryItem.objects.create(category=c2) CategoryItem.objects.create(category=c1) self.assertSequenceEqual(SimpleCategory.objects.order_by('categoryitem', 'pk'), [c1, c2, c1]) def test_filter_reverse_non_integer_pk(self): date_obj = DateTimePK.objects.create() extra_obj = ExtraInfo.objects.create(info='extra', date=date_obj) self.assertEqual( DateTimePK.objects.filter(extrainfo=extra_obj).get(), date_obj, ) def test_ticket10181(self): # Avoid raising an EmptyResultSet if an inner query is probably # empty (and hence, not executed). self.assertQuerysetEqual( Tag.objects.filter(id__in=Tag.objects.filter(id__in=[])), [] ) def test_ticket15316_filter_false(self): c1 = SimpleCategory.objects.create(name="category1") c2 = SpecialCategory.objects.create(name="named category1", special_name="special1") c3 = SpecialCategory.objects.create(name="named category2", special_name="special2") CategoryItem.objects.create(category=c1) ci2 = CategoryItem.objects.create(category=c2) ci3 = CategoryItem.objects.create(category=c3) qs = CategoryItem.objects.filter(category__specialcategory__isnull=False) self.assertEqual(qs.count(), 2) self.assertSequenceEqual(qs, [ci2, ci3]) def test_ticket15316_exclude_false(self): c1 = SimpleCategory.objects.create(name="category1") c2 = SpecialCategory.objects.create(name="named category1", special_name="special1") c3 = SpecialCategory.objects.create(name="named category2", special_name="special2") ci1 = CategoryItem.objects.create(category=c1) CategoryItem.objects.create(category=c2) CategoryItem.objects.create(category=c3) qs = CategoryItem.objects.exclude(category__specialcategory__isnull=False) self.assertEqual(qs.count(), 1) self.assertSequenceEqual(qs, [ci1]) def test_ticket15316_filter_true(self): c1 = SimpleCategory.objects.create(name="category1") c2 = SpecialCategory.objects.create(name="named category1", special_name="special1") c3 = SpecialCategory.objects.create(name="named category2", special_name="special2") ci1 = CategoryItem.objects.create(category=c1) CategoryItem.objects.create(category=c2) CategoryItem.objects.create(category=c3) qs = CategoryItem.objects.filter(category__specialcategory__isnull=True) self.assertEqual(qs.count(), 1) self.assertSequenceEqual(qs, [ci1]) def test_ticket15316_exclude_true(self): c1 = SimpleCategory.objects.create(name="category1") c2 = SpecialCategory.objects.create(name="named category1", special_name="special1") c3 = SpecialCategory.objects.create(name="named category2", special_name="special2") CategoryItem.objects.create(category=c1) ci2 = CategoryItem.objects.create(category=c2) ci3 = CategoryItem.objects.create(category=c3) qs = CategoryItem.objects.exclude(category__specialcategory__isnull=True) self.assertEqual(qs.count(), 2) self.assertSequenceEqual(qs, [ci2, ci3]) def test_ticket15316_one2one_filter_false(self): c = SimpleCategory.objects.create(name="cat") c0 = SimpleCategory.objects.create(name="cat0") c1 = SimpleCategory.objects.create(name="category1") OneToOneCategory.objects.create(category=c1, new_name="new1") OneToOneCategory.objects.create(category=c0, new_name="new2") CategoryItem.objects.create(category=c) ci2 = CategoryItem.objects.create(category=c0) ci3 = CategoryItem.objects.create(category=c1) qs = CategoryItem.objects.filter(category__onetoonecategory__isnull=False).order_by('pk') self.assertEqual(qs.count(), 2) self.assertSequenceEqual(qs, [ci2, ci3]) def test_ticket15316_one2one_exclude_false(self): c = SimpleCategory.objects.create(name="cat") c0 = SimpleCategory.objects.create(name="cat0") c1 = SimpleCategory.objects.create(name="category1") OneToOneCategory.objects.create(category=c1, new_name="new1") OneToOneCategory.objects.create(category=c0, new_name="new2") ci1 = CategoryItem.objects.create(category=c) CategoryItem.objects.create(category=c0) CategoryItem.objects.create(category=c1) qs = CategoryItem.objects.exclude(category__onetoonecategory__isnull=False) self.assertEqual(qs.count(), 1) self.assertSequenceEqual(qs, [ci1]) def test_ticket15316_one2one_filter_true(self): c = SimpleCategory.objects.create(name="cat") c0 = SimpleCategory.objects.create(name="cat0") c1 = SimpleCategory.objects.create(name="category1") OneToOneCategory.objects.create(category=c1, new_name="new1") OneToOneCategory.objects.create(category=c0, new_name="new2") ci1 = CategoryItem.objects.create(category=c) CategoryItem.objects.create(category=c0) CategoryItem.objects.create(category=c1) qs = CategoryItem.objects.filter(category__onetoonecategory__isnull=True) self.assertEqual(qs.count(), 1) self.assertSequenceEqual(qs, [ci1]) def test_ticket15316_one2one_exclude_true(self): c = SimpleCategory.objects.create(name="cat") c0 = SimpleCategory.objects.create(name="cat0") c1 = SimpleCategory.objects.create(name="category1") OneToOneCategory.objects.create(category=c1, new_name="new1") OneToOneCategory.objects.create(category=c0, new_name="new2") CategoryItem.objects.create(category=c) ci2 = CategoryItem.objects.create(category=c0) ci3 = CategoryItem.objects.create(category=c1) qs = CategoryItem.objects.exclude(category__onetoonecategory__isnull=True).order_by('pk') self.assertEqual(qs.count(), 2) self.assertSequenceEqual(qs, [ci2, ci3]) class Queries5Tests(TestCase): @classmethod def setUpTestData(cls): # Ordering by 'rank' gives us rank2, rank1, rank3. Ordering by the # Meta.ordering will be rank3, rank2, rank1. n1 = Note.objects.create(note='n1', misc='foo', id=1) n2 = Note.objects.create(note='n2', misc='bar', id=2) e1 = ExtraInfo.objects.create(info='e1', note=n1) e2 = ExtraInfo.objects.create(info='e2', note=n2) a1 = Author.objects.create(name='a1', num=1001, extra=e1) a2 = Author.objects.create(name='a2', num=2002, extra=e1) a3 = Author.objects.create(name='a3', num=3003, extra=e2) cls.rank1 = Ranking.objects.create(rank=2, author=a2) Ranking.objects.create(rank=1, author=a3) Ranking.objects.create(rank=3, author=a1) def test_ordering(self): # Cross model ordering is possible in Meta, too. self.assertQuerysetEqual( Ranking.objects.all(), ['<Ranking: 3: a1>', '<Ranking: 2: a2>', '<Ranking: 1: a3>'] ) self.assertQuerysetEqual( Ranking.objects.all().order_by('rank'), ['<Ranking: 1: a3>', '<Ranking: 2: a2>', '<Ranking: 3: a1>'] ) # Ordering of extra() pieces is possible, too and you can mix extra # fields and model fields in the ordering. self.assertQuerysetEqual( Ranking.objects.extra(tables=['django_site'], order_by=['-django_site.id', 'rank']), ['<Ranking: 1: a3>', '<Ranking: 2: a2>', '<Ranking: 3: a1>'] ) sql = 'case when %s > 2 then 1 else 0 end' % connection.ops.quote_name('rank') qs = Ranking.objects.extra(select={'good': sql}) self.assertEqual( [o.good for o in qs.extra(order_by=('-good',))], [True, False, False] ) self.assertQuerysetEqual( qs.extra(order_by=('-good', 'id')), ['<Ranking: 3: a1>', '<Ranking: 2: a2>', '<Ranking: 1: a3>'] ) # Despite having some extra aliases in the query, we can still omit # them in a values() query. dicts = qs.values('id', 'rank').order_by('id') self.assertEqual( [d['rank'] for d in dicts], [2, 1, 3] ) def test_ticket7256(self): # An empty values() call includes all aliases, including those from an # extra() sql = 'case when %s > 2 then 1 else 0 end' % connection.ops.quote_name('rank') qs = Ranking.objects.extra(select={'good': sql}) dicts = qs.values().order_by('id') for d in dicts: del d['id'] del d['author_id'] self.assertEqual( [sorted(d.items()) for d in dicts], [[('good', 0), ('rank', 2)], [('good', 0), ('rank', 1)], [('good', 1), ('rank', 3)]] ) def test_ticket7045(self): # Extra tables used to crash SQL construction on the second use. qs = Ranking.objects.extra(tables=['django_site']) qs.query.get_compiler(qs.db).as_sql() # test passes if this doesn't raise an exception. qs.query.get_compiler(qs.db).as_sql() def test_ticket9848(self): # Make sure that updates which only filter on sub-tables don't # inadvertently update the wrong records (bug #9848). author_start = Author.objects.get(name='a1') ranking_start = Ranking.objects.get(author__name='a1') # Make sure that the IDs from different tables don't happen to match. self.assertQuerysetEqual( Ranking.objects.filter(author__name='a1'), ['<Ranking: 3: a1>'] ) self.assertEqual( Ranking.objects.filter(author__name='a1').update(rank=4636), 1 ) r = Ranking.objects.get(author__name='a1') self.assertEqual(r.id, ranking_start.id) self.assertEqual(r.author.id, author_start.id) self.assertEqual(r.rank, 4636) r.rank = 3 r.save() self.assertQuerysetEqual( Ranking.objects.all(), ['<Ranking: 3: a1>', '<Ranking: 2: a2>', '<Ranking: 1: a3>'] ) def test_ticket5261(self): # Test different empty excludes. self.assertQuerysetEqual( Note.objects.exclude(Q()), ['<Note: n1>', '<Note: n2>'] ) self.assertQuerysetEqual( Note.objects.filter(~Q()), ['<Note: n1>', '<Note: n2>'] ) self.assertQuerysetEqual( Note.objects.filter(~Q() | ~Q()), ['<Note: n1>', '<Note: n2>'] ) self.assertQuerysetEqual( Note.objects.exclude(~Q() & ~Q()), ['<Note: n1>', '<Note: n2>'] ) def test_extra_select_literal_percent_s(self): # Allow %%s to escape select clauses self.assertEqual( Note.objects.extra(select={'foo': "'%%s'"})[0].foo, '%s' ) self.assertEqual( Note.objects.extra(select={'foo': "'%%s bar %%s'"})[0].foo, '%s bar %s' ) self.assertEqual( Note.objects.extra(select={'foo': "'bar %%s'"})[0].foo, 'bar %s' ) class SelectRelatedTests(TestCase): def test_tickets_3045_3288(self): # Once upon a time, select_related() with circular relations would loop # infinitely if you forgot to specify "depth". Now we set an arbitrary # default upper bound. self.assertQuerysetEqual(X.objects.all(), []) self.assertQuerysetEqual(X.objects.select_related(), []) class SubclassFKTests(TestCase): def test_ticket7778(self): # Model subclasses could not be deleted if a nullable foreign key # relates to a model that relates back. num_celebs = Celebrity.objects.count() tvc = TvChef.objects.create(name="Huey") self.assertEqual(Celebrity.objects.count(), num_celebs + 1) Fan.objects.create(fan_of=tvc) Fan.objects.create(fan_of=tvc) tvc.delete() # The parent object should have been deleted as well. self.assertEqual(Celebrity.objects.count(), num_celebs) class CustomPkTests(TestCase): def test_ticket7371(self): self.assertQuerysetEqual(Related.objects.order_by('custom'), []) class NullableRelOrderingTests(TestCase): def test_ticket10028(self): # Ordering by model related to nullable relations(!) should use outer # joins, so that all results are included. Plaything.objects.create(name="p1") self.assertQuerysetEqual( Plaything.objects.all(), ['<Plaything: p1>'] ) def test_join_already_in_query(self): # Ordering by model related to nullable relations should not change # the join type of already existing joins. Plaything.objects.create(name="p1") s = SingleObject.objects.create(name='s') r = RelatedObject.objects.create(single=s, f=1) Plaything.objects.create(name="p2", others=r) qs = Plaything.objects.all().filter(others__isnull=False).order_by('pk') self.assertNotIn('JOIN', str(qs.query)) qs = Plaything.objects.all().filter(others__f__isnull=False).order_by('pk') self.assertIn('INNER', str(qs.query)) qs = qs.order_by('others__single__name') # The ordering by others__single__pk will add one new join (to single) # and that join must be LEFT join. The already existing join to related # objects must be kept INNER. So, we have both an INNER and a LEFT join # in the query. self.assertEqual(str(qs.query).count('LEFT'), 1) self.assertEqual(str(qs.query).count('INNER'), 1) self.assertQuerysetEqual( qs, ['<Plaything: p2>'] ) class DisjunctiveFilterTests(TestCase): @classmethod def setUpTestData(cls): cls.n1 = Note.objects.create(note='n1', misc='foo', id=1) ExtraInfo.objects.create(info='e1', note=cls.n1) def test_ticket7872(self): # Another variation on the disjunctive filtering theme. # For the purposes of this regression test, it's important that there is no # Join object related to the LeafA we create. LeafA.objects.create(data='first') self.assertQuerysetEqual(LeafA.objects.all(), ['<LeafA: first>']) self.assertQuerysetEqual( LeafA.objects.filter(Q(data='first') | Q(join__b__data='second')), ['<LeafA: first>'] ) def test_ticket8283(self): # Checking that applying filters after a disjunction works correctly. self.assertQuerysetEqual( (ExtraInfo.objects.filter(note=self.n1) | ExtraInfo.objects.filter(info='e2')).filter(note=self.n1), ['<ExtraInfo: e1>'] ) self.assertQuerysetEqual( (ExtraInfo.objects.filter(info='e2') | ExtraInfo.objects.filter(note=self.n1)).filter(note=self.n1), ['<ExtraInfo: e1>'] ) class Queries6Tests(TestCase): @classmethod def setUpTestData(cls): generic = NamedCategory.objects.create(name="Generic") cls.t1 = Tag.objects.create(name='t1', category=generic) cls.t2 = Tag.objects.create(name='t2', parent=cls.t1, category=generic) cls.t3 = Tag.objects.create(name='t3', parent=cls.t1) cls.t4 = Tag.objects.create(name='t4', parent=cls.t3) cls.t5 = Tag.objects.create(name='t5', parent=cls.t3) n1 = Note.objects.create(note='n1', misc='foo', id=1) ann1 = Annotation.objects.create(name='a1', tag=cls.t1) ann1.notes.add(n1) Annotation.objects.create(name='a2', tag=cls.t4) def test_parallel_iterators(self): # Parallel iterators work. qs = Tag.objects.all() i1, i2 = iter(qs), iter(qs) self.assertEqual(repr(next(i1)), '<Tag: t1>') self.assertEqual(repr(next(i1)), '<Tag: t2>') self.assertEqual(repr(next(i2)), '<Tag: t1>') self.assertEqual(repr(next(i2)), '<Tag: t2>') self.assertEqual(repr(next(i2)), '<Tag: t3>') self.assertEqual(repr(next(i1)), '<Tag: t3>') qs = X.objects.all() self.assertFalse(qs) self.assertFalse(qs) def test_nested_queries_sql(self): # Nested queries should not evaluate the inner query as part of constructing the # SQL (so we should see a nested query here, indicated by two "SELECT" calls). qs = Annotation.objects.filter(notes__in=Note.objects.filter(note="xyzzy")) self.assertEqual( qs.query.get_compiler(qs.db).as_sql()[0].count('SELECT'), 2 ) def test_tickets_8921_9188(self): # Incorrect SQL was being generated for certain types of exclude() # queries that crossed multi-valued relations (#8921, #9188 and some # preemptively discovered cases). self.assertQuerysetEqual( PointerA.objects.filter(connection__pointerb__id=1), [] ) self.assertQuerysetEqual( PointerA.objects.exclude(connection__pointerb__id=1), [] ) self.assertQuerysetEqual( Tag.objects.exclude(children=None), ['<Tag: t1>', '<Tag: t3>'] ) # This example is tricky because the parent could be NULL, so only checking # parents with annotations omits some results (tag t1, in this case). self.assertQuerysetEqual( Tag.objects.exclude(parent__annotation__name="a1"), ['<Tag: t1>', '<Tag: t4>', '<Tag: t5>'] ) # The annotation->tag link is single values and tag->children links is # multi-valued. So we have to split the exclude filter in the middle # and then optimize the inner query without losing results. self.assertQuerysetEqual( Annotation.objects.exclude(tag__children__name="t2"), ['<Annotation: a2>'] ) # Nested queries are possible (although should be used with care, since # they have performance problems on backends like MySQL. self.assertQuerysetEqual( Annotation.objects.filter(notes__in=Note.objects.filter(note="n1")), ['<Annotation: a1>'] ) def test_ticket3739(self): # The all() method on querysets returns a copy of the queryset. q1 = Tag.objects.order_by('name') self.assertIsNot(q1, q1.all()) def test_ticket_11320(self): qs = Tag.objects.exclude(category=None).exclude(category__name='foo') self.assertEqual(str(qs.query).count(' INNER JOIN '), 1) def test_distinct_ordered_sliced_subquery_aggregation(self): self.assertEqual(Tag.objects.distinct().order_by('category__name')[:3].count(), 3) def test_multiple_columns_with_the_same_name_slice(self): self.assertEqual( list(Tag.objects.order_by('name').values_list('name', 'category__name')[:2]), [('t1', 'Generic'), ('t2', 'Generic')], ) self.assertSequenceEqual( Tag.objects.order_by('name').select_related('category')[:2], [self.t1, self.t2], ) self.assertEqual( list(Tag.objects.order_by('-name').values_list('name', 'parent__name')[:2]), [('t5', 't3'), ('t4', 't3')], ) self.assertSequenceEqual( Tag.objects.order_by('-name').select_related('parent')[:2], [self.t5, self.t4], ) class RawQueriesTests(TestCase): def setUp(self): Note.objects.create(note='n1', misc='foo', id=1) def test_ticket14729(self): # Test representation of raw query with one or few parameters passed as list query = "SELECT * FROM queries_note WHERE note = %s" params = ['n1'] qs = Note.objects.raw(query, params=params) self.assertEqual(repr(qs), "<RawQuerySet: SELECT * FROM queries_note WHERE note = n1>") query = "SELECT * FROM queries_note WHERE note = %s and misc = %s" params = ['n1', 'foo'] qs = Note.objects.raw(query, params=params) self.assertEqual(repr(qs), "<RawQuerySet: SELECT * FROM queries_note WHERE note = n1 and misc = foo>") class GeneratorExpressionTests(SimpleTestCase): def test_ticket10432(self): # Using an empty iterator as the rvalue for an "__in" # lookup is legal. self.assertCountEqual(Note.objects.filter(pk__in=iter(())), []) class ComparisonTests(TestCase): def setUp(self): self.n1 = Note.objects.create(note='n1', misc='foo', id=1) e1 = ExtraInfo.objects.create(info='e1', note=self.n1) self.a2 = Author.objects.create(name='a2', num=2002, extra=e1) def test_ticket8597(self): # Regression tests for case-insensitive comparisons Item.objects.create(name="a_b", created=datetime.datetime.now(), creator=self.a2, note=self.n1) Item.objects.create(name="x%y", created=datetime.datetime.now(), creator=self.a2, note=self.n1) self.assertQuerysetEqual( Item.objects.filter(name__iexact="A_b"), ['<Item: a_b>'] ) self.assertQuerysetEqual( Item.objects.filter(name__iexact="x%Y"), ['<Item: x%y>'] ) self.assertQuerysetEqual( Item.objects.filter(name__istartswith="A_b"), ['<Item: a_b>'] ) self.assertQuerysetEqual( Item.objects.filter(name__iendswith="A_b"), ['<Item: a_b>'] ) class ExistsSql(TestCase): def test_exists(self): with CaptureQueriesContext(connection) as captured_queries: self.assertFalse(Tag.objects.exists()) # Ok - so the exist query worked - but did it include too many columns? self.assertEqual(len(captured_queries), 1) qstr = captured_queries[0]['sql'] id, name = connection.ops.quote_name('id'), connection.ops.quote_name('name') self.assertNotIn(id, qstr) self.assertNotIn(name, qstr) def test_ticket_18414(self): Article.objects.create(name='one', created=datetime.datetime.now()) Article.objects.create(name='one', created=datetime.datetime.now()) Article.objects.create(name='two', created=datetime.datetime.now()) self.assertTrue(Article.objects.exists()) self.assertTrue(Article.objects.distinct().exists()) self.assertTrue(Article.objects.distinct()[1:3].exists()) self.assertFalse(Article.objects.distinct()[1:1].exists()) @skipUnlessDBFeature('can_distinct_on_fields') def test_ticket_18414_distinct_on(self): Article.objects.create(name='one', created=datetime.datetime.now()) Article.objects.create(name='one', created=datetime.datetime.now()) Article.objects.create(name='two', created=datetime.datetime.now()) self.assertTrue(Article.objects.distinct('name').exists()) self.assertTrue(Article.objects.distinct('name')[1:2].exists()) self.assertFalse(Article.objects.distinct('name')[2:3].exists()) class QuerysetOrderedTests(unittest.TestCase): """ Tests for the Queryset.ordered attribute. """ def test_no_default_or_explicit_ordering(self): self.assertIs(Annotation.objects.all().ordered, False) def test_cleared_default_ordering(self): self.assertIs(Tag.objects.all().ordered, True) self.assertIs(Tag.objects.all().order_by().ordered, False) def test_explicit_ordering(self): self.assertIs(Annotation.objects.all().order_by('id').ordered, True) def test_empty_queryset(self): self.assertIs(Annotation.objects.none().ordered, True) def test_order_by_extra(self): self.assertIs(Annotation.objects.all().extra(order_by=['id']).ordered, True) def test_annotated_ordering(self): qs = Annotation.objects.annotate(num_notes=Count('notes')) self.assertIs(qs.ordered, False) self.assertIs(qs.order_by('num_notes').ordered, True) @skipUnlessDBFeature('allow_sliced_subqueries_with_in') class SubqueryTests(TestCase): @classmethod def setUpTestData(cls): NamedCategory.objects.create(id=1, name='first') NamedCategory.objects.create(id=2, name='second') NamedCategory.objects.create(id=3, name='third') NamedCategory.objects.create(id=4, name='fourth') def test_ordered_subselect(self): "Subselects honor any manual ordering" query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[0:2]) self.assertEqual(set(query.values_list('id', flat=True)), {3, 4}) query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[:2]) self.assertEqual(set(query.values_list('id', flat=True)), {3, 4}) query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[1:2]) self.assertEqual(set(query.values_list('id', flat=True)), {3}) query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[2:]) self.assertEqual(set(query.values_list('id', flat=True)), {1, 2}) def test_slice_subquery_and_query(self): """ Slice a query that has a sliced subquery """ query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[0:2])[0:2] self.assertEqual({x.id for x in query}, {3, 4}) query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[1:3])[1:3] self.assertEqual({x.id for x in query}, {3}) query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[2:])[1:] self.assertEqual({x.id for x in query}, {2}) def test_related_sliced_subquery(self): """ Related objects constraints can safely contain sliced subqueries. refs #22434 """ generic = NamedCategory.objects.create(id=5, name="Generic") t1 = Tag.objects.create(name='t1', category=generic) t2 = Tag.objects.create(name='t2', category=generic) ManagedModel.objects.create(data='mm1', tag=t1, public=True) mm2 = ManagedModel.objects.create(data='mm2', tag=t2, public=True) query = ManagedModel.normal_manager.filter( tag__in=Tag.objects.order_by('-id')[:1] ) self.assertEqual({x.id for x in query}, {mm2.id}) def test_sliced_delete(self): "Delete queries can safely contain sliced subqueries" DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[0:1]).delete() self.assertEqual(set(DumbCategory.objects.values_list('id', flat=True)), {1, 2, 3}) DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[1:2]).delete() self.assertEqual(set(DumbCategory.objects.values_list('id', flat=True)), {1, 3}) DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[1:]).delete() self.assertEqual(set(DumbCategory.objects.values_list('id', flat=True)), {3}) def test_distinct_ordered_sliced_subquery(self): # Implicit values('id'). self.assertSequenceEqual( NamedCategory.objects.filter( id__in=NamedCategory.objects.distinct().order_by('name')[0:2], ).order_by('name').values_list('name', flat=True), ['first', 'fourth'] ) # Explicit values('id'). self.assertSequenceEqual( NamedCategory.objects.filter( id__in=NamedCategory.objects.distinct().order_by('-name').values('id')[0:2], ).order_by('name').values_list('name', flat=True), ['second', 'third'] ) # Annotated value. self.assertSequenceEqual( DumbCategory.objects.filter( id__in=DumbCategory.objects.annotate( double_id=F('id') * 2 ).order_by('id').distinct().values('double_id')[0:2], ).order_by('id').values_list('id', flat=True), [2, 4] ) @skipUnlessDBFeature('allow_sliced_subqueries_with_in') class QuerySetBitwiseOperationTests(TestCase): @classmethod def setUpTestData(cls): school = School.objects.create() cls.room_1 = Classroom.objects.create(school=school, has_blackboard=False, name='Room 1') cls.room_2 = Classroom.objects.create(school=school, has_blackboard=True, name='Room 2') cls.room_3 = Classroom.objects.create(school=school, has_blackboard=True, name='Room 3') cls.room_4 = Classroom.objects.create(school=school, has_blackboard=False, name='Room 4') def test_or_with_rhs_slice(self): qs1 = Classroom.objects.filter(has_blackboard=True) qs2 = Classroom.objects.filter(has_blackboard=False)[:1] self.assertCountEqual(qs1 | qs2, [self.room_1, self.room_2, self.room_3]) def test_or_with_lhs_slice(self): qs1 = Classroom.objects.filter(has_blackboard=True)[:1] qs2 = Classroom.objects.filter(has_blackboard=False) self.assertCountEqual(qs1 | qs2, [self.room_1, self.room_2, self.room_4]) def test_or_with_both_slice(self): qs1 = Classroom.objects.filter(has_blackboard=False)[:1] qs2 = Classroom.objects.filter(has_blackboard=True)[:1] self.assertCountEqual(qs1 | qs2, [self.room_1, self.room_2]) def test_or_with_both_slice_and_ordering(self): qs1 = Classroom.objects.filter(has_blackboard=False).order_by('-pk')[:1] qs2 = Classroom.objects.filter(has_blackboard=True).order_by('-name')[:1] self.assertCountEqual(qs1 | qs2, [self.room_3, self.room_4]) class CloneTests(TestCase): def test_evaluated_queryset_as_argument(self): "#13227 -- If a queryset is already evaluated, it can still be used as a query arg" n = Note(note='Test1', misc='misc') n.save() e = ExtraInfo(info='good', note=n) e.save() n_list = Note.objects.all() # Evaluate the Note queryset, populating the query cache list(n_list) # Use the note queryset in a query, and evaluate # that query in a way that involves cloning. self.assertEqual(ExtraInfo.objects.filter(note__in=n_list)[0].info, 'good') def test_no_model_options_cloning(self): """ Cloning a queryset does not get out of hand. While complete testing is impossible, this is a sanity check against invalid use of deepcopy. refs #16759. """ opts_class = type(Note._meta) note_deepcopy = getattr(opts_class, "__deepcopy__", None) opts_class.__deepcopy__ = lambda obj, memo: self.fail("Model options shouldn't be cloned.") try: Note.objects.filter(pk__lte=F('pk') + 1).all() finally: if note_deepcopy is None: delattr(opts_class, "__deepcopy__") else: opts_class.__deepcopy__ = note_deepcopy def test_no_fields_cloning(self): """ Cloning a queryset does not get out of hand. While complete testing is impossible, this is a sanity check against invalid use of deepcopy. refs #16759. """ opts_class = type(Note._meta.get_field("misc")) note_deepcopy = getattr(opts_class, "__deepcopy__", None) opts_class.__deepcopy__ = lambda obj, memo: self.fail("Model fields shouldn't be cloned") try: Note.objects.filter(note=F('misc')).all() finally: if note_deepcopy is None: delattr(opts_class, "__deepcopy__") else: opts_class.__deepcopy__ = note_deepcopy class EmptyQuerySetTests(SimpleTestCase): def test_emptyqueryset_values(self): # #14366 -- Calling .values() on an empty QuerySet and then cloning # that should not cause an error self.assertCountEqual(Number.objects.none().values('num').order_by('num'), []) def test_values_subquery(self): self.assertCountEqual(Number.objects.filter(pk__in=Number.objects.none().values('pk')), []) self.assertCountEqual(Number.objects.filter(pk__in=Number.objects.none().values_list('pk')), []) def test_ticket_19151(self): # #19151 -- Calling .values() or .values_list() on an empty QuerySet # should return an empty QuerySet and not cause an error. q = Author.objects.none() self.assertCountEqual(q.values(), []) self.assertCountEqual(q.values_list(), []) class ValuesQuerysetTests(TestCase): @classmethod def setUpTestData(cls): Number.objects.create(num=72) def test_flat_values_list(self): qs = Number.objects.values_list("num") qs = qs.values_list("num", flat=True) self.assertSequenceEqual(qs, [72]) def test_extra_values(self): # testing for ticket 14930 issues qs = Number.objects.extra(select={'value_plus_x': 'num+%s', 'value_minus_x': 'num-%s'}, select_params=(1, 2)) qs = qs.order_by('value_minus_x') qs = qs.values('num') self.assertSequenceEqual(qs, [{'num': 72}]) def test_extra_values_order_twice(self): # testing for ticket 14930 issues qs = Number.objects.extra(select={'value_plus_one': 'num+1', 'value_minus_one': 'num-1'}) qs = qs.order_by('value_minus_one').order_by('value_plus_one') qs = qs.values('num') self.assertSequenceEqual(qs, [{'num': 72}]) def test_extra_values_order_multiple(self): # Postgres doesn't allow constants in order by, so check for that. qs = Number.objects.extra(select={ 'value_plus_one': 'num+1', 'value_minus_one': 'num-1', 'constant_value': '1' }) qs = qs.order_by('value_plus_one', 'value_minus_one', 'constant_value') qs = qs.values('num') self.assertSequenceEqual(qs, [{'num': 72}]) def test_extra_values_order_in_extra(self): # testing for ticket 14930 issues qs = Number.objects.extra( select={'value_plus_one': 'num+1', 'value_minus_one': 'num-1'}, order_by=['value_minus_one'], ) qs = qs.values('num') def test_extra_select_params_values_order_in_extra(self): # testing for 23259 issue qs = Number.objects.extra( select={'value_plus_x': 'num+%s'}, select_params=[1], order_by=['value_plus_x'], ) qs = qs.filter(num=72) qs = qs.values('num') self.assertSequenceEqual(qs, [{'num': 72}]) def test_extra_multiple_select_params_values_order_by(self): # testing for 23259 issue qs = Number.objects.extra(select={'value_plus_x': 'num+%s', 'value_minus_x': 'num-%s'}, select_params=(72, 72)) qs = qs.order_by('value_minus_x') qs = qs.filter(num=1) qs = qs.values('num') self.assertSequenceEqual(qs, []) def test_extra_values_list(self): # testing for ticket 14930 issues qs = Number.objects.extra(select={'value_plus_one': 'num+1'}) qs = qs.order_by('value_plus_one') qs = qs.values_list('num') self.assertSequenceEqual(qs, [(72,)]) def test_flat_extra_values_list(self): # testing for ticket 14930 issues qs = Number.objects.extra(select={'value_plus_one': 'num+1'}) qs = qs.order_by('value_plus_one') qs = qs.values_list('num', flat=True) self.assertSequenceEqual(qs, [72]) def test_field_error_values_list(self): # see #23443 msg = "Cannot resolve keyword %r into field. Join on 'name' not permitted." % 'foo' with self.assertRaisesMessage(FieldError, msg): Tag.objects.values_list('name__foo') def test_named_values_list_flat(self): msg = "'flat' and 'named' can't be used together." with self.assertRaisesMessage(TypeError, msg): Number.objects.values_list('num', flat=True, named=True) def test_named_values_list_bad_field_name(self): msg = "Type names and field names must be valid identifiers: '1'" with self.assertRaisesMessage(ValueError, msg): Number.objects.extra(select={'1': 'num+1'}).values_list('1', named=True).first() def test_named_values_list_with_fields(self): qs = Number.objects.extra(select={'num2': 'num+1'}).annotate(Count('id')) values = qs.values_list('num', 'num2', named=True).first() self.assertEqual(type(values).__name__, 'Row') self.assertEqual(values._fields, ('num', 'num2')) self.assertEqual(values.num, 72) self.assertEqual(values.num2, 73) def test_named_values_list_without_fields(self): qs = Number.objects.extra(select={'num2': 'num+1'}).annotate(Count('id')) values = qs.values_list(named=True).first() self.assertEqual(type(values).__name__, 'Row') self.assertEqual(values._fields, ('num2', 'id', 'num', 'other_num', 'id__count')) self.assertEqual(values.num, 72) self.assertEqual(values.num2, 73) self.assertEqual(values.id__count, 1) def test_named_values_list_expression_with_default_alias(self): expr = Count('id') values = Number.objects.annotate(id__count1=expr).values_list(expr, 'id__count1', named=True).first() self.assertEqual(values._fields, ('id__count2', 'id__count1')) def test_named_values_list_expression(self): expr = F('num') + 1 qs = Number.objects.annotate(combinedexpression1=expr).values_list(expr, 'combinedexpression1', named=True) values = qs.first() self.assertEqual(values._fields, ('combinedexpression2', 'combinedexpression1')) class QuerySetSupportsPythonIdioms(TestCase): @classmethod def setUpTestData(cls): some_date = datetime.datetime(2014, 5, 16, 12, 1) for i in range(1, 8): Article.objects.create( name="Article {}".format(i), created=some_date) def get_ordered_articles(self): return Article.objects.all().order_by('name') def test_can_get_items_using_index_and_slice_notation(self): self.assertEqual(self.get_ordered_articles()[0].name, 'Article 1') self.assertQuerysetEqual( self.get_ordered_articles()[1:3], ["<Article: Article 2>", "<Article: Article 3>"] ) def test_slicing_with_steps_can_be_used(self): self.assertQuerysetEqual( self.get_ordered_articles()[::2], [ "<Article: Article 1>", "<Article: Article 3>", "<Article: Article 5>", "<Article: Article 7>" ] ) def test_slicing_without_step_is_lazy(self): with self.assertNumQueries(0): self.get_ordered_articles()[0:5] def test_slicing_with_tests_is_not_lazy(self): with self.assertNumQueries(1): self.get_ordered_articles()[0:5:3] def test_slicing_can_slice_again_after_slicing(self): self.assertQuerysetEqual( self.get_ordered_articles()[0:5][0:2], ["<Article: Article 1>", "<Article: Article 2>"] ) self.assertQuerysetEqual(self.get_ordered_articles()[0:5][4:], ["<Article: Article 5>"]) self.assertQuerysetEqual(self.get_ordered_articles()[0:5][5:], []) # Some more tests! self.assertQuerysetEqual( self.get_ordered_articles()[2:][0:2], ["<Article: Article 3>", "<Article: Article 4>"] ) self.assertQuerysetEqual( self.get_ordered_articles()[2:][:2], ["<Article: Article 3>", "<Article: Article 4>"] ) self.assertQuerysetEqual(self.get_ordered_articles()[2:][2:3], ["<Article: Article 5>"]) # Using an offset without a limit is also possible. self.assertQuerysetEqual( self.get_ordered_articles()[5:], ["<Article: Article 6>", "<Article: Article 7>"] ) def test_slicing_cannot_filter_queryset_once_sliced(self): with self.assertRaisesMessage(AssertionError, "Cannot filter a query once a slice has been taken."): Article.objects.all()[0:5].filter(id=1) def test_slicing_cannot_reorder_queryset_once_sliced(self): with self.assertRaisesMessage(AssertionError, "Cannot reorder a query once a slice has been taken."): Article.objects.all()[0:5].order_by('id') def test_slicing_cannot_combine_queries_once_sliced(self): with self.assertRaisesMessage(AssertionError, "Cannot combine queries once a slice has been taken."): Article.objects.all()[0:1] & Article.objects.all()[4:5] def test_slicing_negative_indexing_not_supported_for_single_element(self): """hint: inverting your ordering might do what you need""" with self.assertRaisesMessage(AssertionError, "Negative indexing is not supported."): Article.objects.all()[-1] def test_slicing_negative_indexing_not_supported_for_range(self): """hint: inverting your ordering might do what you need""" with self.assertRaisesMessage(AssertionError, "Negative indexing is not supported."): Article.objects.all()[0:-5] def test_invalid_index(self): msg = 'QuerySet indices must be integers or slices, not str.' with self.assertRaisesMessage(TypeError, msg): Article.objects.all()['foo'] def test_can_get_number_of_items_in_queryset_using_standard_len(self): self.assertEqual(len(Article.objects.filter(name__exact='Article 1')), 1) def test_can_combine_queries_using_and_and_or_operators(self): s1 = Article.objects.filter(name__exact='Article 1') s2 = Article.objects.filter(name__exact='Article 2') self.assertQuerysetEqual( (s1 | s2).order_by('name'), ["<Article: Article 1>", "<Article: Article 2>"] ) self.assertQuerysetEqual(s1 & s2, []) class WeirdQuerysetSlicingTests(TestCase): @classmethod def setUpTestData(cls): Number.objects.create(num=1) Number.objects.create(num=2) Article.objects.create(name='one', created=datetime.datetime.now()) Article.objects.create(name='two', created=datetime.datetime.now()) Article.objects.create(name='three', created=datetime.datetime.now()) Article.objects.create(name='four', created=datetime.datetime.now()) food = Food.objects.create(name='spam') Eaten.objects.create(meal='spam with eggs', food=food) def test_tickets_7698_10202(self): # People like to slice with '0' as the high-water mark. self.assertQuerysetEqual(Article.objects.all()[0:0], []) self.assertQuerysetEqual(Article.objects.all()[0:0][:10], []) self.assertEqual(Article.objects.all()[:0].count(), 0) with self.assertRaisesMessage(TypeError, 'Cannot reverse a query once a slice has been taken.'): Article.objects.all()[:0].latest('created') def test_empty_resultset_sql(self): # ticket #12192 self.assertNumQueries(0, lambda: list(Number.objects.all()[1:1])) def test_empty_sliced_subquery(self): self.assertEqual(Eaten.objects.filter(food__in=Food.objects.all()[0:0]).count(), 0) def test_empty_sliced_subquery_exclude(self): self.assertEqual(Eaten.objects.exclude(food__in=Food.objects.all()[0:0]).count(), 1) def test_zero_length_values_slicing(self): n = 42 with self.assertNumQueries(0): self.assertQuerysetEqual(Article.objects.values()[n:n], []) self.assertQuerysetEqual(Article.objects.values_list()[n:n], []) class EscapingTests(TestCase): def test_ticket_7302(self): # Reserved names are appropriately escaped ReservedName.objects.create(name='a', order=42) ReservedName.objects.create(name='b', order=37) self.assertQuerysetEqual( ReservedName.objects.all().order_by('order'), ['<ReservedName: b>', '<ReservedName: a>'] ) self.assertQuerysetEqual( ReservedName.objects.extra(select={'stuff': 'name'}, order_by=('order', 'stuff')), ['<ReservedName: b>', '<ReservedName: a>'] ) class ToFieldTests(TestCase): def test_in_query(self): apple = Food.objects.create(name="apple") pear = Food.objects.create(name="pear") lunch = Eaten.objects.create(food=apple, meal="lunch") dinner = Eaten.objects.create(food=pear, meal="dinner") self.assertEqual( set(Eaten.objects.filter(food__in=[apple, pear])), {lunch, dinner}, ) def test_in_subquery(self): apple = Food.objects.create(name="apple") lunch = Eaten.objects.create(food=apple, meal="lunch") self.assertEqual( set(Eaten.objects.filter(food__in=Food.objects.filter(name='apple'))), {lunch} ) self.assertEqual( set(Eaten.objects.filter(food__in=Food.objects.filter(name='apple').values('eaten__meal'))), set() ) self.assertEqual( set(Food.objects.filter(eaten__in=Eaten.objects.filter(meal='lunch'))), {apple} ) def test_nested_in_subquery(self): extra = ExtraInfo.objects.create() author = Author.objects.create(num=42, extra=extra) report = Report.objects.create(creator=author) comment = ReportComment.objects.create(report=report) comments = ReportComment.objects.filter( report__in=Report.objects.filter( creator__in=extra.author_set.all(), ), ) self.assertSequenceEqual(comments, [comment]) def test_reverse_in(self): apple = Food.objects.create(name="apple") pear = Food.objects.create(name="pear") lunch_apple = Eaten.objects.create(food=apple, meal="lunch") lunch_pear = Eaten.objects.create(food=pear, meal="dinner") self.assertEqual( set(Food.objects.filter(eaten__in=[lunch_apple, lunch_pear])), {apple, pear} ) def test_single_object(self): apple = Food.objects.create(name="apple") lunch = Eaten.objects.create(food=apple, meal="lunch") dinner = Eaten.objects.create(food=apple, meal="dinner") self.assertEqual( set(Eaten.objects.filter(food=apple)), {lunch, dinner} ) def test_single_object_reverse(self): apple = Food.objects.create(name="apple") lunch = Eaten.objects.create(food=apple, meal="lunch") self.assertEqual( set(Food.objects.filter(eaten=lunch)), {apple} ) def test_recursive_fk(self): node1 = Node.objects.create(num=42) node2 = Node.objects.create(num=1, parent=node1) self.assertEqual( list(Node.objects.filter(parent=node1)), [node2] ) def test_recursive_fk_reverse(self): node1 = Node.objects.create(num=42) node2 = Node.objects.create(num=1, parent=node1) self.assertEqual( list(Node.objects.filter(node=node2)), [node1] ) class IsNullTests(TestCase): def test_primary_key(self): custom = CustomPk.objects.create(name='pk') null = Related.objects.create() notnull = Related.objects.create(custom=custom) self.assertSequenceEqual(Related.objects.filter(custom__isnull=False), [notnull]) self.assertSequenceEqual(Related.objects.filter(custom__isnull=True), [null]) def test_to_field(self): apple = Food.objects.create(name="apple") Eaten.objects.create(food=apple, meal="lunch") Eaten.objects.create(meal="lunch") self.assertQuerysetEqual( Eaten.objects.filter(food__isnull=False), ['<Eaten: apple at lunch>'] ) self.assertQuerysetEqual( Eaten.objects.filter(food__isnull=True), ['<Eaten: None at lunch>'] ) class ConditionalTests(TestCase): """Tests whose execution depend on different environment conditions like Python version or DB backend features""" @classmethod def setUpTestData(cls): generic = NamedCategory.objects.create(name="Generic") t1 = Tag.objects.create(name='t1', category=generic) Tag.objects.create(name='t2', parent=t1, category=generic) t3 = Tag.objects.create(name='t3', parent=t1) Tag.objects.create(name='t4', parent=t3) Tag.objects.create(name='t5', parent=t3) def test_infinite_loop(self): # If you're not careful, it's possible to introduce infinite loops via # default ordering on foreign keys in a cycle. We detect that. with self.assertRaisesMessage(FieldError, 'Infinite loop caused by ordering.'): list(LoopX.objects.all()) # Force queryset evaluation with list() with self.assertRaisesMessage(FieldError, 'Infinite loop caused by ordering.'): list(LoopZ.objects.all()) # Force queryset evaluation with list() # Note that this doesn't cause an infinite loop, since the default # ordering on the Tag model is empty (and thus defaults to using "id" # for the related field). self.assertEqual(len(Tag.objects.order_by('parent')), 5) # ... but you can still order in a non-recursive fashion among linked # fields (the previous test failed because the default ordering was # recursive). self.assertQuerysetEqual( LoopX.objects.all().order_by('y__x__y__x__id'), [] ) # When grouping without specifying ordering, we add an explicit "ORDER BY NULL" # portion in MySQL to prevent unnecessary sorting. @skipUnlessDBFeature('requires_explicit_null_ordering_when_grouping') def test_null_ordering_added(self): query = Tag.objects.values_list('parent_id', flat=True).order_by().query query.group_by = ['parent_id'] sql = query.get_compiler(DEFAULT_DB_ALIAS).as_sql()[0] fragment = "ORDER BY " pos = sql.find(fragment) self.assertEqual(sql.find(fragment, pos + 1), -1) self.assertEqual(sql.find("NULL", pos + len(fragment)), pos + len(fragment)) def test_in_list_limit(self): # The "in" lookup works with lists of 1000 items or more. # The numbers amount is picked to force three different IN batches # for Oracle, yet to be less than 2100 parameter limit for MSSQL. numbers = list(range(2050)) max_query_params = connection.features.max_query_params if max_query_params is None or max_query_params >= len(numbers): Number.objects.bulk_create(Number(num=num) for num in numbers) for number in [1000, 1001, 2000, len(numbers)]: with self.subTest(number=number): self.assertEqual(Number.objects.filter(num__in=numbers[:number]).count(), number) class UnionTests(unittest.TestCase): """ Tests for the union of two querysets. Bug #12252. """ @classmethod def setUpTestData(cls): objectas = [] objectbs = [] objectcs = [] a_info = ['one', 'two', 'three'] for name in a_info: o = ObjectA(name=name) o.save() objectas.append(o) b_info = [('un', 1, objectas[0]), ('deux', 2, objectas[0]), ('trois', 3, objectas[2])] for name, number, objecta in b_info: o = ObjectB(name=name, num=number, objecta=objecta) o.save() objectbs.append(o) c_info = [('ein', objectas[2], objectbs[2]), ('zwei', objectas[1], objectbs[1])] for name, objecta, objectb in c_info: o = ObjectC(name=name, objecta=objecta, objectb=objectb) o.save() objectcs.append(o) def check_union(self, model, Q1, Q2): filter = model.objects.filter self.assertEqual(set(filter(Q1) | filter(Q2)), set(filter(Q1 | Q2))) self.assertEqual(set(filter(Q2) | filter(Q1)), set(filter(Q1 | Q2))) def test_A_AB(self): Q1 = Q(name='two') Q2 = Q(objectb__name='deux') self.check_union(ObjectA, Q1, Q2) def test_A_AB2(self): Q1 = Q(name='two') Q2 = Q(objectb__name='deux', objectb__num=2) self.check_union(ObjectA, Q1, Q2) def test_AB_ACB(self): Q1 = Q(objectb__name='deux') Q2 = Q(objectc__objectb__name='deux') self.check_union(ObjectA, Q1, Q2) def test_BAB_BAC(self): Q1 = Q(objecta__objectb__name='deux') Q2 = Q(objecta__objectc__name='ein') self.check_union(ObjectB, Q1, Q2) def test_BAB_BACB(self): Q1 = Q(objecta__objectb__name='deux') Q2 = Q(objecta__objectc__objectb__name='trois') self.check_union(ObjectB, Q1, Q2) def test_BA_BCA__BAB_BAC_BCA(self): Q1 = Q(objecta__name='one', objectc__objecta__name='two') Q2 = Q(objecta__objectc__name='ein', objectc__objecta__name='three', objecta__objectb__name='trois') self.check_union(ObjectB, Q1, Q2) class DefaultValuesInsertTest(TestCase): def test_no_extra_params(self): """ Can create an instance of a model with only the PK field (#17056)." """ DumbCategory.objects.create() class ExcludeTests(TestCase): @classmethod def setUpTestData(cls): f1 = Food.objects.create(name='apples') Food.objects.create(name='oranges') Eaten.objects.create(food=f1, meal='dinner') j1 = Job.objects.create(name='Manager') r1 = Responsibility.objects.create(description='Playing golf') j2 = Job.objects.create(name='Programmer') r2 = Responsibility.objects.create(description='Programming') JobResponsibilities.objects.create(job=j1, responsibility=r1) JobResponsibilities.objects.create(job=j2, responsibility=r2) def test_to_field(self): self.assertQuerysetEqual( Food.objects.exclude(eaten__meal='dinner'), ['<Food: oranges>']) self.assertQuerysetEqual( Job.objects.exclude(responsibilities__description='Playing golf'), ['<Job: Programmer>']) self.assertQuerysetEqual( Responsibility.objects.exclude(jobs__name='Manager'), ['<Responsibility: Programming>']) def test_ticket14511(self): alex = Person.objects.get_or_create(name='Alex')[0] jane = Person.objects.get_or_create(name='Jane')[0] oracle = Company.objects.get_or_create(name='Oracle')[0] google = Company.objects.get_or_create(name='Google')[0] microsoft = Company.objects.get_or_create(name='Microsoft')[0] intel = Company.objects.get_or_create(name='Intel')[0] def employ(employer, employee, title): Employment.objects.get_or_create(employee=employee, employer=employer, title=title) employ(oracle, alex, 'Engineer') employ(oracle, alex, 'Developer') employ(google, alex, 'Engineer') employ(google, alex, 'Manager') employ(microsoft, alex, 'Manager') employ(intel, alex, 'Manager') employ(microsoft, jane, 'Developer') employ(intel, jane, 'Manager') alex_tech_employers = alex.employers.filter( employment__title__in=('Engineer', 'Developer')).distinct().order_by('name') self.assertSequenceEqual(alex_tech_employers, [google, oracle]) alex_nontech_employers = alex.employers.exclude( employment__title__in=('Engineer', 'Developer')).distinct().order_by('name') self.assertSequenceEqual(alex_nontech_employers, [google, intel, microsoft]) def test_exclude_reverse_fk_field_ref(self): tag = Tag.objects.create() Note.objects.create(tag=tag, note='note') annotation = Annotation.objects.create(name='annotation', tag=tag) self.assertEqual(Annotation.objects.exclude(tag__note__note=F('name')).get(), annotation) def test_exclude_with_circular_fk_relation(self): self.assertEqual(ObjectB.objects.exclude(objecta__objectb__name=F('name')).count(), 0) class ExcludeTest17600(TestCase): """ Some regressiontests for ticket #17600. Some of these likely duplicate other existing tests. """ @classmethod def setUpTestData(cls): # Create a few Orders. cls.o1 = Order.objects.create(pk=1) cls.o2 = Order.objects.create(pk=2) cls.o3 = Order.objects.create(pk=3) # Create some OrderItems for the first order with homogeneous # status_id values cls.oi1 = OrderItem.objects.create(order=cls.o1, status=1) cls.oi2 = OrderItem.objects.create(order=cls.o1, status=1) cls.oi3 = OrderItem.objects.create(order=cls.o1, status=1) # Create some OrderItems for the second order with heterogeneous # status_id values cls.oi4 = OrderItem.objects.create(order=cls.o2, status=1) cls.oi5 = OrderItem.objects.create(order=cls.o2, status=2) cls.oi6 = OrderItem.objects.create(order=cls.o2, status=3) # Create some OrderItems for the second order with heterogeneous # status_id values cls.oi7 = OrderItem.objects.create(order=cls.o3, status=2) cls.oi8 = OrderItem.objects.create(order=cls.o3, status=3) cls.oi9 = OrderItem.objects.create(order=cls.o3, status=4) def test_exclude_plain(self): """ This should exclude Orders which have some items with status 1 """ self.assertQuerysetEqual( Order.objects.exclude(items__status=1), ['<Order: 3>']) def test_exclude_plain_distinct(self): """ This should exclude Orders which have some items with status 1 """ self.assertQuerysetEqual( Order.objects.exclude(items__status=1).distinct(), ['<Order: 3>']) def test_exclude_with_q_object_distinct(self): """ This should exclude Orders which have some items with status 1 """ self.assertQuerysetEqual( Order.objects.exclude(Q(items__status=1)).distinct(), ['<Order: 3>']) def test_exclude_with_q_object_no_distinct(self): """ This should exclude Orders which have some items with status 1 """ self.assertQuerysetEqual( Order.objects.exclude(Q(items__status=1)), ['<Order: 3>']) def test_exclude_with_q_is_equal_to_plain_exclude(self): """ Using exclude(condition) and exclude(Q(condition)) should yield the same QuerySet """ self.assertEqual( list(Order.objects.exclude(items__status=1).distinct()), list(Order.objects.exclude(Q(items__status=1)).distinct())) def test_exclude_with_q_is_equal_to_plain_exclude_variation(self): """ Using exclude(condition) and exclude(Q(condition)) should yield the same QuerySet """ self.assertEqual( list(Order.objects.exclude(items__status=1)), list(Order.objects.exclude(Q(items__status=1)).distinct())) @unittest.expectedFailure def test_only_orders_with_all_items_having_status_1(self): """ This should only return orders having ALL items set to status 1, or those items not having any orders at all. The correct way to write this query in SQL seems to be using two nested subqueries. """ self.assertQuerysetEqual( Order.objects.exclude(~Q(items__status=1)).distinct(), ['<Order: 1>']) class Exclude15786(TestCase): """Regression test for #15786""" def test_ticket15786(self): c1 = SimpleCategory.objects.create(name='c1') c2 = SimpleCategory.objects.create(name='c2') OneToOneCategory.objects.create(category=c1) OneToOneCategory.objects.create(category=c2) rel = CategoryRelationship.objects.create(first=c1, second=c2) self.assertEqual( CategoryRelationship.objects.exclude( first__onetoonecategory=F('second__onetoonecategory') ).get(), rel ) class NullInExcludeTest(TestCase): @classmethod def setUpTestData(cls): NullableName.objects.create(name='i1') NullableName.objects.create() def test_null_in_exclude_qs(self): none_val = '' if connection.features.interprets_empty_strings_as_nulls else None self.assertQuerysetEqual( NullableName.objects.exclude(name__in=[]), ['i1', none_val], attrgetter('name')) self.assertQuerysetEqual( NullableName.objects.exclude(name__in=['i1']), [none_val], attrgetter('name')) self.assertQuerysetEqual( NullableName.objects.exclude(name__in=['i3']), ['i1', none_val], attrgetter('name')) inner_qs = NullableName.objects.filter(name='i1').values_list('name') self.assertQuerysetEqual( NullableName.objects.exclude(name__in=inner_qs), [none_val], attrgetter('name')) # The inner queryset wasn't executed - it should be turned # into subquery above self.assertIs(inner_qs._result_cache, None) @unittest.expectedFailure def test_col_not_in_list_containing_null(self): """ The following case is not handled properly because SQL's COL NOT IN (list containing null) handling is too weird to abstract away. """ self.assertQuerysetEqual( NullableName.objects.exclude(name__in=[None]), ['i1'], attrgetter('name')) def test_double_exclude(self): self.assertEqual( list(NullableName.objects.filter(~~Q(name='i1'))), list(NullableName.objects.filter(Q(name='i1')))) self.assertNotIn( 'IS NOT NULL', str(NullableName.objects.filter(~~Q(name='i1')).query)) class EmptyStringsAsNullTest(TestCase): """ Filtering on non-null character fields works as expected. The reason for these tests is that Oracle treats '' as NULL, and this can cause problems in query construction. Refs #17957. """ @classmethod def setUpTestData(cls): cls.nc = NamedCategory.objects.create(name='') def test_direct_exclude(self): self.assertQuerysetEqual( NamedCategory.objects.exclude(name__in=['nonexistent']), [self.nc.pk], attrgetter('pk') ) def test_joined_exclude(self): self.assertQuerysetEqual( DumbCategory.objects.exclude(namedcategory__name__in=['nonexistent']), [self.nc.pk], attrgetter('pk') ) def test_21001(self): foo = NamedCategory.objects.create(name='foo') self.assertQuerysetEqual( NamedCategory.objects.exclude(name=''), [foo.pk], attrgetter('pk') ) class ProxyQueryCleanupTest(TestCase): def test_evaluated_proxy_count(self): """ Generating the query string doesn't alter the query's state in irreversible ways. Refs #18248. """ ProxyCategory.objects.create() qs = ProxyCategory.objects.all() self.assertEqual(qs.count(), 1) str(qs.query) self.assertEqual(qs.count(), 1) class WhereNodeTest(SimpleTestCase): class DummyNode: def as_sql(self, compiler, connection): return 'dummy', [] class MockCompiler: def compile(self, node): return node.as_sql(self, connection) def __call__(self, name): return connection.ops.quote_name(name) def test_empty_full_handling_conjunction(self): compiler = WhereNodeTest.MockCompiler() w = WhereNode(children=[NothingNode()]) with self.assertRaises(EmptyResultSet): w.as_sql(compiler, connection) w.negate() self.assertEqual(w.as_sql(compiler, connection), ('', [])) w = WhereNode(children=[self.DummyNode(), self.DummyNode()]) self.assertEqual(w.as_sql(compiler, connection), ('(dummy AND dummy)', [])) w.negate() self.assertEqual(w.as_sql(compiler, connection), ('NOT (dummy AND dummy)', [])) w = WhereNode(children=[NothingNode(), self.DummyNode()]) with self.assertRaises(EmptyResultSet): w.as_sql(compiler, connection) w.negate() self.assertEqual(w.as_sql(compiler, connection), ('', [])) def test_empty_full_handling_disjunction(self): compiler = WhereNodeTest.MockCompiler() w = WhereNode(children=[NothingNode()], connector='OR') with self.assertRaises(EmptyResultSet): w.as_sql(compiler, connection) w.negate() self.assertEqual(w.as_sql(compiler, connection), ('', [])) w = WhereNode(children=[self.DummyNode(), self.DummyNode()], connector='OR') self.assertEqual(w.as_sql(compiler, connection), ('(dummy OR dummy)', [])) w.negate() self.assertEqual(w.as_sql(compiler, connection), ('NOT (dummy OR dummy)', [])) w = WhereNode(children=[NothingNode(), self.DummyNode()], connector='OR') self.assertEqual(w.as_sql(compiler, connection), ('dummy', [])) w.negate() self.assertEqual(w.as_sql(compiler, connection), ('NOT (dummy)', [])) def test_empty_nodes(self): compiler = WhereNodeTest.MockCompiler() empty_w = WhereNode() w = WhereNode(children=[empty_w, empty_w]) self.assertEqual(w.as_sql(compiler, connection), ('', [])) w.negate() with self.assertRaises(EmptyResultSet): w.as_sql(compiler, connection) w.connector = 'OR' with self.assertRaises(EmptyResultSet): w.as_sql(compiler, connection) w.negate() self.assertEqual(w.as_sql(compiler, connection), ('', [])) w = WhereNode(children=[empty_w, NothingNode()], connector='OR') self.assertEqual(w.as_sql(compiler, connection), ('', [])) w = WhereNode(children=[empty_w, NothingNode()], connector='AND') with self.assertRaises(EmptyResultSet): w.as_sql(compiler, connection) class QuerySetExceptionTests(SimpleTestCase): def test_iter_exceptions(self): qs = ExtraInfo.objects.only('author') msg = "'ManyToOneRel' object has no attribute 'attname'" with self.assertRaisesMessage(AttributeError, msg): list(qs) def test_invalid_qs_list(self): # Test for #19895 - second iteration over invalid queryset # raises errors. qs = Article.objects.order_by('invalid_column') msg = "Cannot resolve keyword 'invalid_column' into field." with self.assertRaisesMessage(FieldError, msg): list(qs) with self.assertRaisesMessage(FieldError, msg): list(qs) def test_invalid_order_by(self): msg = "Invalid order_by arguments: ['*']" with self.assertRaisesMessage(FieldError, msg): list(Article.objects.order_by('*')) def test_invalid_queryset_model(self): msg = 'Cannot use QuerySet for "Article": Use a QuerySet for "ExtraInfo".' with self.assertRaisesMessage(ValueError, msg): list(Author.objects.filter(extra=Article.objects.all())) class NullJoinPromotionOrTest(TestCase): @classmethod def setUpTestData(cls): cls.d1 = ModelD.objects.create(name='foo') d2 = ModelD.objects.create(name='bar') cls.a1 = ModelA.objects.create(name='a1', d=cls.d1) c = ModelC.objects.create(name='c') b = ModelB.objects.create(name='b', c=c) cls.a2 = ModelA.objects.create(name='a2', b=b, d=d2) def test_ticket_17886(self): # The first Q-object is generating the match, the rest of the filters # should not remove the match even if they do not match anything. The # problem here was that b__name generates a LOUTER JOIN, then # b__c__name generates join to c, which the ORM tried to promote but # failed as that join isn't nullable. q_obj = ( Q(d__name='foo') | Q(b__name='foo') | Q(b__c__name='foo') ) qset = ModelA.objects.filter(q_obj) self.assertEqual(list(qset), [self.a1]) # We generate one INNER JOIN to D. The join is direct and not nullable # so we can use INNER JOIN for it. However, we can NOT use INNER JOIN # for the b->c join, as a->b is nullable. self.assertEqual(str(qset.query).count('INNER JOIN'), 1) def test_isnull_filter_promotion(self): qs = ModelA.objects.filter(Q(b__name__isnull=True)) self.assertEqual(str(qs.query).count('LEFT OUTER'), 1) self.assertEqual(list(qs), [self.a1]) qs = ModelA.objects.filter(~Q(b__name__isnull=True)) self.assertEqual(str(qs.query).count('INNER JOIN'), 1) self.assertEqual(list(qs), [self.a2]) qs = ModelA.objects.filter(~~Q(b__name__isnull=True)) self.assertEqual(str(qs.query).count('LEFT OUTER'), 1) self.assertEqual(list(qs), [self.a1]) qs = ModelA.objects.filter(Q(b__name__isnull=False)) self.assertEqual(str(qs.query).count('INNER JOIN'), 1) self.assertEqual(list(qs), [self.a2]) qs = ModelA.objects.filter(~Q(b__name__isnull=False)) self.assertEqual(str(qs.query).count('LEFT OUTER'), 1) self.assertEqual(list(qs), [self.a1]) qs = ModelA.objects.filter(~~Q(b__name__isnull=False)) self.assertEqual(str(qs.query).count('INNER JOIN'), 1) self.assertEqual(list(qs), [self.a2]) def test_null_join_demotion(self): qs = ModelA.objects.filter(Q(b__name__isnull=False) & Q(b__name__isnull=True)) self.assertIn(' INNER JOIN ', str(qs.query)) qs = ModelA.objects.filter(Q(b__name__isnull=True) & Q(b__name__isnull=False)) self.assertIn(' INNER JOIN ', str(qs.query)) qs = ModelA.objects.filter(Q(b__name__isnull=False) | Q(b__name__isnull=True)) self.assertIn(' LEFT OUTER JOIN ', str(qs.query)) qs = ModelA.objects.filter(Q(b__name__isnull=True) | Q(b__name__isnull=False)) self.assertIn(' LEFT OUTER JOIN ', str(qs.query)) def test_ticket_21366(self): n = Note.objects.create(note='n', misc='m') e = ExtraInfo.objects.create(info='info', note=n) a = Author.objects.create(name='Author1', num=1, extra=e) Ranking.objects.create(rank=1, author=a) r1 = Report.objects.create(name='Foo', creator=a) r2 = Report.objects.create(name='Bar') Report.objects.create(name='Bar', creator=a) qs = Report.objects.filter( Q(creator__ranking__isnull=True) | Q(creator__ranking__rank=1, name='Foo') ) self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2) self.assertEqual(str(qs.query).count(' JOIN '), 2) self.assertSequenceEqual(qs.order_by('name'), [r2, r1]) def test_ticket_21748(self): i1 = Identifier.objects.create(name='i1') i2 = Identifier.objects.create(name='i2') i3 = Identifier.objects.create(name='i3') Program.objects.create(identifier=i1) Channel.objects.create(identifier=i1) Program.objects.create(identifier=i2) self.assertSequenceEqual(Identifier.objects.filter(program=None, channel=None), [i3]) self.assertSequenceEqual(Identifier.objects.exclude(program=None, channel=None).order_by('name'), [i1, i2]) def test_ticket_21748_double_negated_and(self): i1 = Identifier.objects.create(name='i1') i2 = Identifier.objects.create(name='i2') Identifier.objects.create(name='i3') p1 = Program.objects.create(identifier=i1) c1 = Channel.objects.create(identifier=i1) Program.objects.create(identifier=i2) # Check the ~~Q() (or equivalently .exclude(~Q)) works like Q() for # join promotion. qs1_doubleneg = Identifier.objects.exclude(~Q(program__id=p1.id, channel__id=c1.id)).order_by('pk') qs1_filter = Identifier.objects.filter(program__id=p1.id, channel__id=c1.id).order_by('pk') self.assertQuerysetEqual(qs1_doubleneg, qs1_filter, lambda x: x) self.assertEqual(str(qs1_filter.query).count('JOIN'), str(qs1_doubleneg.query).count('JOIN')) self.assertEqual(2, str(qs1_doubleneg.query).count('INNER JOIN')) self.assertEqual(str(qs1_filter.query).count('INNER JOIN'), str(qs1_doubleneg.query).count('INNER JOIN')) def test_ticket_21748_double_negated_or(self): i1 = Identifier.objects.create(name='i1') i2 = Identifier.objects.create(name='i2') Identifier.objects.create(name='i3') p1 = Program.objects.create(identifier=i1) c1 = Channel.objects.create(identifier=i1) p2 = Program.objects.create(identifier=i2) # Test OR + doubleneg. The expected result is that channel is LOUTER # joined, program INNER joined qs1_filter = Identifier.objects.filter( Q(program__id=p2.id, channel__id=c1.id) | Q(program__id=p1.id) ).order_by('pk') qs1_doubleneg = Identifier.objects.exclude( ~Q(Q(program__id=p2.id, channel__id=c1.id) | Q(program__id=p1.id)) ).order_by('pk') self.assertQuerysetEqual(qs1_doubleneg, qs1_filter, lambda x: x) self.assertEqual(str(qs1_filter.query).count('JOIN'), str(qs1_doubleneg.query).count('JOIN')) self.assertEqual(1, str(qs1_doubleneg.query).count('INNER JOIN')) self.assertEqual(str(qs1_filter.query).count('INNER JOIN'), str(qs1_doubleneg.query).count('INNER JOIN')) def test_ticket_21748_complex_filter(self): i1 = Identifier.objects.create(name='i1') i2 = Identifier.objects.create(name='i2') Identifier.objects.create(name='i3') p1 = Program.objects.create(identifier=i1) c1 = Channel.objects.create(identifier=i1) p2 = Program.objects.create(identifier=i2) # Finally, a more complex case, one time in a way where each # NOT is pushed to lowest level in the boolean tree, and # another query where this isn't done. qs1 = Identifier.objects.filter( ~Q(~Q(program__id=p2.id, channel__id=c1.id) & Q(program__id=p1.id)) ).order_by('pk') qs2 = Identifier.objects.filter( Q(Q(program__id=p2.id, channel__id=c1.id) | ~Q(program__id=p1.id)) ).order_by('pk') self.assertQuerysetEqual(qs1, qs2, lambda x: x) self.assertEqual(str(qs1.query).count('JOIN'), str(qs2.query).count('JOIN')) self.assertEqual(0, str(qs1.query).count('INNER JOIN')) self.assertEqual(str(qs1.query).count('INNER JOIN'), str(qs2.query).count('INNER JOIN')) class ReverseJoinTrimmingTest(TestCase): def test_reverse_trimming(self): # We don't accidentally trim reverse joins - we can't know if there is # anything on the other side of the join, so trimming reverse joins # can't be done, ever. t = Tag.objects.create() qs = Tag.objects.filter(annotation__tag=t.pk) self.assertIn('INNER JOIN', str(qs.query)) self.assertEqual(list(qs), []) class JoinReuseTest(TestCase): """ The queries reuse joins sensibly (for example, direct joins are always reused). """ def test_fk_reuse(self): qs = Annotation.objects.filter(tag__name='foo').filter(tag__name='bar') self.assertEqual(str(qs.query).count('JOIN'), 1) def test_fk_reuse_select_related(self): qs = Annotation.objects.filter(tag__name='foo').select_related('tag') self.assertEqual(str(qs.query).count('JOIN'), 1) def test_fk_reuse_annotation(self): qs = Annotation.objects.filter(tag__name='foo').annotate(cnt=Count('tag__name')) self.assertEqual(str(qs.query).count('JOIN'), 1) def test_fk_reuse_disjunction(self): qs = Annotation.objects.filter(Q(tag__name='foo') | Q(tag__name='bar')) self.assertEqual(str(qs.query).count('JOIN'), 1) def test_fk_reuse_order_by(self): qs = Annotation.objects.filter(tag__name='foo').order_by('tag__name') self.assertEqual(str(qs.query).count('JOIN'), 1) def test_revo2o_reuse(self): qs = Detail.objects.filter(member__name='foo').filter(member__name='foo') self.assertEqual(str(qs.query).count('JOIN'), 1) def test_revfk_noreuse(self): qs = Author.objects.filter(report__name='r4').filter(report__name='r1') self.assertEqual(str(qs.query).count('JOIN'), 2) def test_inverted_q_across_relations(self): """ When a trimmable join is specified in the query (here school__), the ORM detects it and removes unnecessary joins. The set of reusable joins are updated after trimming the query so that other lookups don't consider that the outer query's filters are in effect for the subquery (#26551). """ springfield_elementary = School.objects.create() hogward = School.objects.create() Student.objects.create(school=springfield_elementary) hp = Student.objects.create(school=hogward) Classroom.objects.create(school=hogward, name='Potion') Classroom.objects.create(school=springfield_elementary, name='Main') qs = Student.objects.filter( ~(Q(school__classroom__name='Main') & Q(school__classroom__has_blackboard=None)) ) self.assertSequenceEqual(qs, [hp]) class DisjunctionPromotionTests(TestCase): def test_disjunction_promotion_select_related(self): fk1 = FK1.objects.create(f1='f1', f2='f2') basea = BaseA.objects.create(a=fk1) qs = BaseA.objects.filter(Q(a=fk1) | Q(b=2)) self.assertEqual(str(qs.query).count(' JOIN '), 0) qs = qs.select_related('a', 'b') self.assertEqual(str(qs.query).count(' INNER JOIN '), 0) self.assertEqual(str(qs.query).count(' LEFT OUTER JOIN '), 2) with self.assertNumQueries(1): self.assertSequenceEqual(qs, [basea]) self.assertEqual(qs[0].a, fk1) self.assertIs(qs[0].b, None) def test_disjunction_promotion1(self): # Pre-existing join, add two ORed filters to the same join, # all joins can be INNER JOINS. qs = BaseA.objects.filter(a__f1='foo') self.assertEqual(str(qs.query).count('INNER JOIN'), 1) qs = qs.filter(Q(b__f1='foo') | Q(b__f2='foo')) self.assertEqual(str(qs.query).count('INNER JOIN'), 2) # Reverse the order of AND and OR filters. qs = BaseA.objects.filter(Q(b__f1='foo') | Q(b__f2='foo')) self.assertEqual(str(qs.query).count('INNER JOIN'), 1) qs = qs.filter(a__f1='foo') self.assertEqual(str(qs.query).count('INNER JOIN'), 2) def test_disjunction_promotion2(self): qs = BaseA.objects.filter(a__f1='foo') self.assertEqual(str(qs.query).count('INNER JOIN'), 1) # Now we have two different joins in an ORed condition, these # must be OUTER joins. The pre-existing join should remain INNER. qs = qs.filter(Q(b__f1='foo') | Q(c__f2='foo')) self.assertEqual(str(qs.query).count('INNER JOIN'), 1) self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2) # Reverse case. qs = BaseA.objects.filter(Q(b__f1='foo') | Q(c__f2='foo')) self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2) qs = qs.filter(a__f1='foo') self.assertEqual(str(qs.query).count('INNER JOIN'), 1) self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2) def test_disjunction_promotion3(self): qs = BaseA.objects.filter(a__f2='bar') self.assertEqual(str(qs.query).count('INNER JOIN'), 1) # The ANDed a__f2 filter allows us to use keep using INNER JOIN # even inside the ORed case. If the join to a__ returns nothing, # the ANDed filter for a__f2 can't be true. qs = qs.filter(Q(a__f1='foo') | Q(b__f2='foo')) self.assertEqual(str(qs.query).count('INNER JOIN'), 1) self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1) def test_disjunction_promotion3_demote(self): # This one needs demotion logic: the first filter causes a to be # outer joined, the second filter makes it inner join again. qs = BaseA.objects.filter( Q(a__f1='foo') | Q(b__f2='foo')).filter(a__f2='bar') self.assertEqual(str(qs.query).count('INNER JOIN'), 1) self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1) def test_disjunction_promotion4_demote(self): qs = BaseA.objects.filter(Q(a=1) | Q(a=2)) self.assertEqual(str(qs.query).count('JOIN'), 0) # Demote needed for the "a" join. It is marked as outer join by # above filter (even if it is trimmed away). qs = qs.filter(a__f1='foo') self.assertEqual(str(qs.query).count('INNER JOIN'), 1) def test_disjunction_promotion4(self): qs = BaseA.objects.filter(a__f1='foo') self.assertEqual(str(qs.query).count('INNER JOIN'), 1) qs = qs.filter(Q(a=1) | Q(a=2)) self.assertEqual(str(qs.query).count('INNER JOIN'), 1) def test_disjunction_promotion5_demote(self): qs = BaseA.objects.filter(Q(a=1) | Q(a=2)) # Note that the above filters on a force the join to an # inner join even if it is trimmed. self.assertEqual(str(qs.query).count('JOIN'), 0) qs = qs.filter(Q(a__f1='foo') | Q(b__f1='foo')) # So, now the a__f1 join doesn't need promotion. self.assertEqual(str(qs.query).count('INNER JOIN'), 1) # But b__f1 does. self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1) qs = BaseA.objects.filter(Q(a__f1='foo') | Q(b__f1='foo')) # Now the join to a is created as LOUTER self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2) qs = qs.filter(Q(a=1) | Q(a=2)) self.assertEqual(str(qs.query).count('INNER JOIN'), 1) self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1) def test_disjunction_promotion6(self): qs = BaseA.objects.filter(Q(a=1) | Q(a=2)) self.assertEqual(str(qs.query).count('JOIN'), 0) qs = BaseA.objects.filter(Q(a__f1='foo') & Q(b__f1='foo')) self.assertEqual(str(qs.query).count('INNER JOIN'), 2) self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 0) qs = BaseA.objects.filter(Q(a__f1='foo') & Q(b__f1='foo')) self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 0) self.assertEqual(str(qs.query).count('INNER JOIN'), 2) qs = qs.filter(Q(a=1) | Q(a=2)) self.assertEqual(str(qs.query).count('INNER JOIN'), 2) self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 0) def test_disjunction_promotion7(self): qs = BaseA.objects.filter(Q(a=1) | Q(a=2)) self.assertEqual(str(qs.query).count('JOIN'), 0) qs = BaseA.objects.filter(Q(a__f1='foo') | (Q(b__f1='foo') & Q(a__f1='bar'))) self.assertEqual(str(qs.query).count('INNER JOIN'), 1) self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1) qs = BaseA.objects.filter( (Q(a__f1='foo') | Q(b__f1='foo')) & (Q(a__f1='bar') | Q(c__f1='foo')) ) self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 3) self.assertEqual(str(qs.query).count('INNER JOIN'), 0) qs = BaseA.objects.filter( (Q(a__f1='foo') | (Q(a__f1='bar')) & (Q(b__f1='bar') | Q(c__f1='foo'))) ) self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2) self.assertEqual(str(qs.query).count('INNER JOIN'), 1) def test_disjunction_promotion_fexpression(self): qs = BaseA.objects.filter(Q(a__f1=F('b__f1')) | Q(b__f1='foo')) self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1) self.assertEqual(str(qs.query).count('INNER JOIN'), 1) qs = BaseA.objects.filter(Q(a__f1=F('c__f1')) | Q(b__f1='foo')) self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 3) qs = BaseA.objects.filter(Q(a__f1=F('b__f1')) | Q(a__f2=F('b__f2')) | Q(c__f1='foo')) self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 3) qs = BaseA.objects.filter(Q(a__f1=F('c__f1')) | (Q(pk=1) & Q(pk=2))) self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2) self.assertEqual(str(qs.query).count('INNER JOIN'), 0) class ManyToManyExcludeTest(TestCase): def test_exclude_many_to_many(self): Identifier.objects.create(name='extra') program = Program.objects.create(identifier=Identifier.objects.create(name='program')) channel = Channel.objects.create(identifier=Identifier.objects.create(name='channel')) channel.programs.add(program) # channel contains 'program1', so all Identifiers except that one # should be returned self.assertQuerysetEqual( Identifier.objects.exclude(program__channel=channel).order_by('name'), ['<Identifier: channel>', '<Identifier: extra>'] ) self.assertQuerysetEqual( Identifier.objects.exclude(program__channel=None).order_by('name'), ['<Identifier: program>'] ) def test_ticket_12823(self): pg3 = Page.objects.create(text='pg3') pg2 = Page.objects.create(text='pg2') pg1 = Page.objects.create(text='pg1') pa1 = Paragraph.objects.create(text='pa1') pa1.page.set([pg1, pg2]) pa2 = Paragraph.objects.create(text='pa2') pa2.page.set([pg2, pg3]) pa3 = Paragraph.objects.create(text='pa3') ch1 = Chapter.objects.create(title='ch1', paragraph=pa1) ch2 = Chapter.objects.create(title='ch2', paragraph=pa2) ch3 = Chapter.objects.create(title='ch3', paragraph=pa3) b1 = Book.objects.create(title='b1', chapter=ch1) b2 = Book.objects.create(title='b2', chapter=ch2) b3 = Book.objects.create(title='b3', chapter=ch3) q = Book.objects.exclude(chapter__paragraph__page__text='pg1') self.assertNotIn('IS NOT NULL', str(q.query)) self.assertEqual(len(q), 2) self.assertNotIn(b1, q) self.assertIn(b2, q) self.assertIn(b3, q) class RelabelCloneTest(TestCase): def test_ticket_19964(self): my1 = MyObject.objects.create(data='foo') my1.parent = my1 my1.save() my2 = MyObject.objects.create(data='bar', parent=my1) parents = MyObject.objects.filter(parent=F('id')) children = MyObject.objects.filter(parent__in=parents).exclude(parent=F('id')) self.assertEqual(list(parents), [my1]) # Evaluating the children query (which has parents as part of it) does # not change results for the parents query. self.assertEqual(list(children), [my2]) self.assertEqual(list(parents), [my1]) class Ticket20101Tests(TestCase): def test_ticket_20101(self): """ Tests QuerySet ORed combining in exclude subquery case. """ t = Tag.objects.create(name='foo') a1 = Annotation.objects.create(tag=t, name='a1') a2 = Annotation.objects.create(tag=t, name='a2') a3 = Annotation.objects.create(tag=t, name='a3') n = Note.objects.create(note='foo', misc='bar') qs1 = Note.objects.exclude(annotation__in=[a1, a2]) qs2 = Note.objects.filter(annotation__in=[a3]) self.assertIn(n, qs1) self.assertNotIn(n, qs2) self.assertIn(n, (qs1 | qs2)) class EmptyStringPromotionTests(SimpleTestCase): def test_empty_string_promotion(self): qs = RelatedObject.objects.filter(single__name='') if connection.features.interprets_empty_strings_as_nulls: self.assertIn('LEFT OUTER JOIN', str(qs.query)) else: self.assertNotIn('LEFT OUTER JOIN', str(qs.query)) class ValuesSubqueryTests(TestCase): def test_values_in_subquery(self): # If a values() queryset is used, then the given values # will be used instead of forcing use of the relation's field. o1 = Order.objects.create(id=-2) o2 = Order.objects.create(id=-1) oi1 = OrderItem.objects.create(order=o1, status=0) oi1.status = oi1.pk oi1.save() OrderItem.objects.create(order=o2, status=0) # The query below should match o1 as it has related order_item # with id == status. self.assertSequenceEqual(Order.objects.filter(items__in=OrderItem.objects.values_list('status')), [o1]) class DoubleInSubqueryTests(TestCase): def test_double_subquery_in(self): lfa1 = LeafA.objects.create(data='foo') lfa2 = LeafA.objects.create(data='bar') lfb1 = LeafB.objects.create(data='lfb1') lfb2 = LeafB.objects.create(data='lfb2') Join.objects.create(a=lfa1, b=lfb1) Join.objects.create(a=lfa2, b=lfb2) leaf_as = LeafA.objects.filter(data='foo').values_list('pk', flat=True) joins = Join.objects.filter(a__in=leaf_as).values_list('b__id', flat=True) qs = LeafB.objects.filter(pk__in=joins) self.assertSequenceEqual(qs, [lfb1]) class Ticket18785Tests(SimpleTestCase): def test_ticket_18785(self): # Test join trimming from ticket18785 qs = Item.objects.exclude( note__isnull=False ).filter( name='something', creator__extra__isnull=True ).order_by() self.assertEqual(1, str(qs.query).count('INNER JOIN')) self.assertEqual(0, str(qs.query).count('OUTER JOIN')) class Ticket20788Tests(TestCase): def test_ticket_20788(self): Paragraph.objects.create() paragraph = Paragraph.objects.create() page = paragraph.page.create() chapter = Chapter.objects.create(paragraph=paragraph) Book.objects.create(chapter=chapter) paragraph2 = Paragraph.objects.create() Page.objects.create() chapter2 = Chapter.objects.create(paragraph=paragraph2) book2 = Book.objects.create(chapter=chapter2) sentences_not_in_pub = Book.objects.exclude(chapter__paragraph__page=page) self.assertSequenceEqual(sentences_not_in_pub, [book2]) class Ticket12807Tests(TestCase): def test_ticket_12807(self): p1 = Paragraph.objects.create() p2 = Paragraph.objects.create() # The ORed condition below should have no effect on the query - the # ~Q(pk__in=[]) will always be True. qs = Paragraph.objects.filter((Q(pk=p2.pk) | ~Q(pk__in=[])) & Q(pk=p1.pk)) self.assertSequenceEqual(qs, [p1]) class RelatedLookupTypeTests(TestCase): error = 'Cannot query "%s": Must be "%s" instance.' @classmethod def setUpTestData(cls): cls.oa = ObjectA.objects.create(name="oa") cls.poa = ProxyObjectA.objects.get(name="oa") cls.coa = ChildObjectA.objects.create(name="coa") cls.wrong_type = Order.objects.create(id=cls.oa.pk) cls.ob = ObjectB.objects.create(name="ob", objecta=cls.oa, num=1) ProxyObjectB.objects.create(name="pob", objecta=cls.oa, num=2) cls.pob = ProxyObjectB.objects.all() ObjectC.objects.create(childobjecta=cls.coa) def test_wrong_type_lookup(self): """ A ValueError is raised when the incorrect object type is passed to a query lookup. """ # Passing incorrect object type with self.assertRaisesMessage(ValueError, self.error % (self.wrong_type, ObjectA._meta.object_name)): ObjectB.objects.get(objecta=self.wrong_type) with self.assertRaisesMessage(ValueError, self.error % (self.wrong_type, ObjectA._meta.object_name)): ObjectB.objects.filter(objecta__in=[self.wrong_type]) with self.assertRaisesMessage(ValueError, self.error % (self.wrong_type, ObjectA._meta.object_name)): ObjectB.objects.filter(objecta=self.wrong_type) with self.assertRaisesMessage(ValueError, self.error % (self.wrong_type, ObjectB._meta.object_name)): ObjectA.objects.filter(objectb__in=[self.wrong_type, self.ob]) # Passing an object of the class on which query is done. with self.assertRaisesMessage(ValueError, self.error % (self.ob, ObjectA._meta.object_name)): ObjectB.objects.filter(objecta__in=[self.poa, self.ob]) with self.assertRaisesMessage(ValueError, self.error % (self.ob, ChildObjectA._meta.object_name)): ObjectC.objects.exclude(childobjecta__in=[self.coa, self.ob]) def test_wrong_backward_lookup(self): """ A ValueError is raised when the incorrect object type is passed to a query lookup for backward relations. """ with self.assertRaisesMessage(ValueError, self.error % (self.oa, ObjectB._meta.object_name)): ObjectA.objects.filter(objectb__in=[self.oa, self.ob]) with self.assertRaisesMessage(ValueError, self.error % (self.oa, ObjectB._meta.object_name)): ObjectA.objects.exclude(objectb=self.oa) with self.assertRaisesMessage(ValueError, self.error % (self.wrong_type, ObjectB._meta.object_name)): ObjectA.objects.get(objectb=self.wrong_type) def test_correct_lookup(self): """ When passing proxy model objects, child objects, or parent objects, lookups work fine. """ out_a = ['<ObjectA: oa>'] out_b = ['<ObjectB: ob>', '<ObjectB: pob>'] out_c = ['<ObjectC: >'] # proxy model objects self.assertQuerysetEqual(ObjectB.objects.filter(objecta=self.poa).order_by('name'), out_b) self.assertQuerysetEqual(ObjectA.objects.filter(objectb__in=self.pob).order_by('pk'), out_a * 2) # child objects self.assertQuerysetEqual(ObjectB.objects.filter(objecta__in=[self.coa]), []) self.assertQuerysetEqual(ObjectB.objects.filter(objecta__in=[self.poa, self.coa]).order_by('name'), out_b) self.assertQuerysetEqual( ObjectB.objects.filter(objecta__in=iter([self.poa, self.coa])).order_by('name'), out_b ) # parent objects self.assertQuerysetEqual(ObjectC.objects.exclude(childobjecta=self.oa), out_c) # QuerySet related object type checking shouldn't issue queries # (the querysets aren't evaluated here, hence zero queries) (#23266). with self.assertNumQueries(0): ObjectB.objects.filter(objecta__in=ObjectA.objects.all()) def test_values_queryset_lookup(self): """ #23396 - Ensure ValueQuerySets are not checked for compatibility with the lookup field """ # Make sure the num and objecta field values match. ob = ObjectB.objects.get(name='ob') ob.num = ob.objecta.pk ob.save() pob = ObjectB.objects.get(name='pob') pob.num = pob.objecta.pk pob.save() self.assertQuerysetEqual(ObjectB.objects.filter( objecta__in=ObjectB.objects.all().values_list('num') ).order_by('pk'), ['<ObjectB: ob>', '<ObjectB: pob>']) class Ticket14056Tests(TestCase): def test_ticket_14056(self): s1 = SharedConnection.objects.create(data='s1') s2 = SharedConnection.objects.create(data='s2') s3 = SharedConnection.objects.create(data='s3') PointerA.objects.create(connection=s2) expected_ordering = ( [s1, s3, s2] if connection.features.nulls_order_largest else [s2, s1, s3] ) self.assertSequenceEqual(SharedConnection.objects.order_by('-pointera__connection', 'pk'), expected_ordering) class Ticket20955Tests(TestCase): def test_ticket_20955(self): jack = Staff.objects.create(name='jackstaff') jackstaff = StaffUser.objects.create(staff=jack) jill = Staff.objects.create(name='jillstaff') jillstaff = StaffUser.objects.create(staff=jill) task = Task.objects.create(creator=jackstaff, owner=jillstaff, title="task") task_get = Task.objects.get(pk=task.pk) # Load data so that assertNumQueries doesn't complain about the get # version's queries. task_get.creator.staffuser.staff task_get.owner.staffuser.staff qs = Task.objects.select_related( 'creator__staffuser__staff', 'owner__staffuser__staff') self.assertEqual(str(qs.query).count(' JOIN '), 6) task_select_related = qs.get(pk=task.pk) with self.assertNumQueries(0): self.assertEqual(task_select_related.creator.staffuser.staff, task_get.creator.staffuser.staff) self.assertEqual(task_select_related.owner.staffuser.staff, task_get.owner.staffuser.staff) class Ticket21203Tests(TestCase): def test_ticket_21203(self): p = Ticket21203Parent.objects.create(parent_bool=True) c = Ticket21203Child.objects.create(parent=p) qs = Ticket21203Child.objects.select_related('parent').defer('parent__created') self.assertSequenceEqual(qs, [c]) self.assertIs(qs[0].parent.parent_bool, True) class ValuesJoinPromotionTests(TestCase): def test_values_no_promotion_for_existing(self): qs = Node.objects.filter(parent__parent__isnull=False) self.assertIn(' INNER JOIN ', str(qs.query)) qs = qs.values('parent__parent__id') self.assertIn(' INNER JOIN ', str(qs.query)) # Make sure there is a left outer join without the filter. qs = Node.objects.values('parent__parent__id') self.assertIn(' LEFT OUTER JOIN ', str(qs.query)) def test_non_nullable_fk_not_promoted(self): qs = ObjectB.objects.values('objecta__name') self.assertIn(' INNER JOIN ', str(qs.query)) def test_ticket_21376(self): a = ObjectA.objects.create() ObjectC.objects.create(objecta=a) qs = ObjectC.objects.filter( Q(objecta=a) | Q(objectb__objecta=a), ) qs = qs.filter( Q(objectb=1) | Q(objecta=a), ) self.assertEqual(qs.count(), 1) tblname = connection.ops.quote_name(ObjectB._meta.db_table) self.assertIn(' LEFT OUTER JOIN %s' % tblname, str(qs.query)) class ForeignKeyToBaseExcludeTests(TestCase): def test_ticket_21787(self): sc1 = SpecialCategory.objects.create(special_name='sc1', name='sc1') sc2 = SpecialCategory.objects.create(special_name='sc2', name='sc2') sc3 = SpecialCategory.objects.create(special_name='sc3', name='sc3') c1 = CategoryItem.objects.create(category=sc1) CategoryItem.objects.create(category=sc2) self.assertSequenceEqual(SpecialCategory.objects.exclude(categoryitem__id=c1.pk).order_by('name'), [sc2, sc3]) self.assertSequenceEqual(SpecialCategory.objects.filter(categoryitem__id=c1.pk), [sc1]) class ReverseM2MCustomPkTests(TestCase): def test_ticket_21879(self): cpt1 = CustomPkTag.objects.create(id='cpt1', tag='cpt1') cp1 = CustomPk.objects.create(name='cp1', extra='extra') cp1.custompktag_set.add(cpt1) self.assertSequenceEqual(CustomPk.objects.filter(custompktag=cpt1), [cp1]) self.assertSequenceEqual(CustomPkTag.objects.filter(custom_pk=cp1), [cpt1]) class Ticket22429Tests(TestCase): def test_ticket_22429(self): sc1 = School.objects.create() st1 = Student.objects.create(school=sc1) sc2 = School.objects.create() st2 = Student.objects.create(school=sc2) cr = Classroom.objects.create(school=sc1) cr.students.add(st1) queryset = Student.objects.filter(~Q(classroom__school=F('school'))) self.assertSequenceEqual(queryset, [st2]) class Ticket23605Tests(TestCase): def test_ticket_23605(self): # Test filtering on a complicated q-object from ticket's report. # The query structure is such that we have multiple nested subqueries. # The original problem was that the inner queries weren't relabeled # correctly. # See also #24090. a1 = Ticket23605A.objects.create() a2 = Ticket23605A.objects.create() c1 = Ticket23605C.objects.create(field_c0=10000.0) Ticket23605B.objects.create( field_b0=10000.0, field_b1=True, modelc_fk=c1, modela_fk=a1) complex_q = Q(pk__in=Ticket23605A.objects.filter( Q( # True for a1 as field_b0 = 10000, field_c0=10000 # False for a2 as no ticket23605b found ticket23605b__field_b0__gte=1000000 / F("ticket23605b__modelc_fk__field_c0") ) & # True for a1 (field_b1=True) Q(ticket23605b__field_b1=True) & ~Q(ticket23605b__pk__in=Ticket23605B.objects.filter( ~( # Same filters as above commented filters, but # double-negated (one for Q() above, one for # parentheses). So, again a1 match, a2 not. Q(field_b1=True) & Q(field_b0__gte=1000000 / F("modelc_fk__field_c0")) ) ))).filter(ticket23605b__field_b1=True)) qs1 = Ticket23605A.objects.filter(complex_q) self.assertSequenceEqual(qs1, [a1]) qs2 = Ticket23605A.objects.exclude(complex_q) self.assertSequenceEqual(qs2, [a2]) class TestTicket24279(TestCase): def test_ticket_24278(self): School.objects.create() qs = School.objects.filter(Q(pk__in=()) | Q()) self.assertQuerysetEqual(qs, []) class TestInvalidValuesRelation(SimpleTestCase): def test_invalid_values(self): msg = "Field 'id' expected a number but got 'abc'." with self.assertRaisesMessage(ValueError, msg): Annotation.objects.filter(tag='abc') with self.assertRaisesMessage(ValueError, msg): Annotation.objects.filter(tag__in=[123, 'abc']) class TestTicket24605(TestCase): def test_ticket_24605(self): """ Subquery table names should be quoted. """ i1 = Individual.objects.create(alive=True) RelatedIndividual.objects.create(related=i1) i2 = Individual.objects.create(alive=False) RelatedIndividual.objects.create(related=i2) i3 = Individual.objects.create(alive=True) i4 = Individual.objects.create(alive=False) self.assertSequenceEqual(Individual.objects.filter(Q(alive=False), Q(related_individual__isnull=True)), [i4]) self.assertSequenceEqual( Individual.objects.exclude(Q(alive=False), Q(related_individual__isnull=True)).order_by('pk'), [i1, i2, i3] ) class Ticket23622Tests(TestCase): @skipUnlessDBFeature('can_distinct_on_fields') def test_ticket_23622(self): """ Make sure __pk__in and __in work the same for related fields when using a distinct on subquery. """ a1 = Ticket23605A.objects.create() a2 = Ticket23605A.objects.create() c1 = Ticket23605C.objects.create(field_c0=0.0) Ticket23605B.objects.create( modela_fk=a1, field_b0=123, field_b1=True, modelc_fk=c1, ) Ticket23605B.objects.create( modela_fk=a1, field_b0=23, field_b1=True, modelc_fk=c1, ) Ticket23605B.objects.create( modela_fk=a1, field_b0=234, field_b1=True, modelc_fk=c1, ) Ticket23605B.objects.create( modela_fk=a1, field_b0=12, field_b1=True, modelc_fk=c1, ) Ticket23605B.objects.create( modela_fk=a2, field_b0=567, field_b1=True, modelc_fk=c1, ) Ticket23605B.objects.create( modela_fk=a2, field_b0=76, field_b1=True, modelc_fk=c1, ) Ticket23605B.objects.create( modela_fk=a2, field_b0=7, field_b1=True, modelc_fk=c1, ) Ticket23605B.objects.create( modela_fk=a2, field_b0=56, field_b1=True, modelc_fk=c1, ) qx = ( Q(ticket23605b__pk__in=Ticket23605B.objects.order_by('modela_fk', '-field_b1').distinct('modela_fk')) & Q(ticket23605b__field_b0__gte=300) ) qy = ( Q(ticket23605b__in=Ticket23605B.objects.order_by('modela_fk', '-field_b1').distinct('modela_fk')) & Q(ticket23605b__field_b0__gte=300) ) self.assertEqual( set(Ticket23605A.objects.filter(qx).values_list('pk', flat=True)), set(Ticket23605A.objects.filter(qy).values_list('pk', flat=True)) ) self.assertSequenceEqual(Ticket23605A.objects.filter(qx), [a2])
014bf53bc729676874351c153c04f8936367bff85aaaf54145bc9a577d6d71aa
""" Various complex queries that have been problematic in the past. """ import threading from django.db import models class DumbCategory(models.Model): pass class ProxyCategory(DumbCategory): class Meta: proxy = True class NamedCategory(DumbCategory): name = models.CharField(max_length=10) def __str__(self): return self.name class Tag(models.Model): name = models.CharField(max_length=10) parent = models.ForeignKey( 'self', models.SET_NULL, blank=True, null=True, related_name='children', ) category = models.ForeignKey(NamedCategory, models.SET_NULL, null=True, default=None) class Meta: ordering = ['name'] def __str__(self): return self.name class Note(models.Model): note = models.CharField(max_length=100) misc = models.CharField(max_length=10) tag = models.ForeignKey(Tag, models.SET_NULL, blank=True, null=True) class Meta: ordering = ['note'] def __str__(self): return self.note def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # Regression for #13227 -- having an attribute that # is unpicklable doesn't stop you from cloning queries # that use objects of that type as an argument. self.lock = threading.Lock() class Annotation(models.Model): name = models.CharField(max_length=10) tag = models.ForeignKey(Tag, models.CASCADE) notes = models.ManyToManyField(Note) def __str__(self): return self.name class DateTimePK(models.Model): date = models.DateTimeField(primary_key=True, auto_now_add=True) class ExtraInfo(models.Model): info = models.CharField(max_length=100) note = models.ForeignKey(Note, models.CASCADE, null=True) value = models.IntegerField(null=True) date = models.ForeignKey(DateTimePK, models.SET_NULL, null=True) class Meta: ordering = ['info'] def __str__(self): return self.info class Author(models.Model): name = models.CharField(max_length=10) num = models.IntegerField(unique=True) extra = models.ForeignKey(ExtraInfo, models.CASCADE) class Meta: ordering = ['name'] def __str__(self): return self.name class Item(models.Model): name = models.CharField(max_length=10) created = models.DateTimeField() modified = models.DateTimeField(blank=True, null=True) tags = models.ManyToManyField(Tag, blank=True) creator = models.ForeignKey(Author, models.CASCADE) note = models.ForeignKey(Note, models.CASCADE) class Meta: ordering = ['-note', 'name'] def __str__(self): return self.name class Report(models.Model): name = models.CharField(max_length=10) creator = models.ForeignKey(Author, models.SET_NULL, to_field='num', null=True) def __str__(self): return self.name class ReportComment(models.Model): report = models.ForeignKey(Report, models.CASCADE) class Ranking(models.Model): rank = models.IntegerField() author = models.ForeignKey(Author, models.CASCADE) class Meta: # A complex ordering specification. Should stress the system a bit. ordering = ('author__extra__note', 'author__name', 'rank') def __str__(self): return '%d: %s' % (self.rank, self.author.name) class Cover(models.Model): title = models.CharField(max_length=50) item = models.ForeignKey(Item, models.CASCADE) class Meta: ordering = ['item'] def __str__(self): return self.title class Number(models.Model): num = models.IntegerField() other_num = models.IntegerField(null=True) def __str__(self): return str(self.num) # Symmetrical m2m field with a normal field using the reverse accessor name # ("valid"). class Valid(models.Model): valid = models.CharField(max_length=10) parent = models.ManyToManyField('self') class Meta: ordering = ['valid'] # Some funky cross-linked models for testing a couple of infinite recursion # cases. class X(models.Model): y = models.ForeignKey('Y', models.CASCADE) class Y(models.Model): x1 = models.ForeignKey(X, models.CASCADE, related_name='y1') # Some models with a cycle in the default ordering. This would be bad if we # didn't catch the infinite loop. class LoopX(models.Model): y = models.ForeignKey('LoopY', models.CASCADE) class Meta: ordering = ['y'] class LoopY(models.Model): x = models.ForeignKey(LoopX, models.CASCADE) class Meta: ordering = ['x'] class LoopZ(models.Model): z = models.ForeignKey('self', models.CASCADE) class Meta: ordering = ['z'] # A model and custom default manager combination. class CustomManager(models.Manager): def get_queryset(self): qs = super().get_queryset() return qs.filter(public=True, tag__name='t1') class ManagedModel(models.Model): data = models.CharField(max_length=10) tag = models.ForeignKey(Tag, models.CASCADE) public = models.BooleanField(default=True) objects = CustomManager() normal_manager = models.Manager() def __str__(self): return self.data # An inter-related setup with multiple paths from Child to Detail. class Detail(models.Model): data = models.CharField(max_length=10) class MemberManager(models.Manager): def get_queryset(self): return super().get_queryset().select_related("details") class Member(models.Model): name = models.CharField(max_length=10) details = models.OneToOneField(Detail, models.CASCADE, primary_key=True) objects = MemberManager() class Child(models.Model): person = models.OneToOneField(Member, models.CASCADE, primary_key=True) parent = models.ForeignKey(Member, models.CASCADE, related_name="children") # Custom primary keys interfered with ordering in the past. class CustomPk(models.Model): name = models.CharField(max_length=10, primary_key=True) extra = models.CharField(max_length=10) class Meta: ordering = ['name', 'extra'] class Related(models.Model): custom = models.ForeignKey(CustomPk, models.CASCADE, null=True) class CustomPkTag(models.Model): id = models.CharField(max_length=20, primary_key=True) custom_pk = models.ManyToManyField(CustomPk) tag = models.CharField(max_length=20) # An inter-related setup with a model subclass that has a nullable # path to another model, and a return path from that model. class Celebrity(models.Model): name = models.CharField("Name", max_length=20) greatest_fan = models.ForeignKey("Fan", models.SET_NULL, null=True, unique=True) def __str__(self): return self.name class TvChef(Celebrity): pass class Fan(models.Model): fan_of = models.ForeignKey(Celebrity, models.CASCADE) # Multiple foreign keys class LeafA(models.Model): data = models.CharField(max_length=10) def __str__(self): return self.data class LeafB(models.Model): data = models.CharField(max_length=10) class Join(models.Model): a = models.ForeignKey(LeafA, models.CASCADE) b = models.ForeignKey(LeafB, models.CASCADE) class ReservedName(models.Model): name = models.CharField(max_length=20) order = models.IntegerField() def __str__(self): return self.name # A simpler shared-foreign-key setup that can expose some problems. class SharedConnection(models.Model): data = models.CharField(max_length=10) def __str__(self): return self.data class PointerA(models.Model): connection = models.ForeignKey(SharedConnection, models.CASCADE) class PointerB(models.Model): connection = models.ForeignKey(SharedConnection, models.CASCADE) # Multi-layer ordering class SingleObject(models.Model): name = models.CharField(max_length=10) class Meta: ordering = ['name'] def __str__(self): return self.name class RelatedObject(models.Model): single = models.ForeignKey(SingleObject, models.SET_NULL, null=True) f = models.IntegerField(null=True) class Meta: ordering = ['single'] class Plaything(models.Model): name = models.CharField(max_length=10) others = models.ForeignKey(RelatedObject, models.SET_NULL, null=True) class Meta: ordering = ['others'] def __str__(self): return self.name class Article(models.Model): name = models.CharField(max_length=20) created = models.DateTimeField() def __str__(self): return self.name class Food(models.Model): name = models.CharField(max_length=20, unique=True) def __str__(self): return self.name class Eaten(models.Model): food = models.ForeignKey(Food, models.SET_NULL, to_field="name", null=True) meal = models.CharField(max_length=20) def __str__(self): return "%s at %s" % (self.food, self.meal) class Node(models.Model): num = models.IntegerField(unique=True) parent = models.ForeignKey("self", models.SET_NULL, to_field="num", null=True) def __str__(self): return "%s" % self.num # Bug #12252 class ObjectA(models.Model): name = models.CharField(max_length=50) def __str__(self): return self.name def __iter__(self): # Ticket #23721 assert False, 'type checking should happen without calling model __iter__' class ProxyObjectA(ObjectA): class Meta: proxy = True class ChildObjectA(ObjectA): pass class ObjectB(models.Model): name = models.CharField(max_length=50) objecta = models.ForeignKey(ObjectA, models.CASCADE) num = models.PositiveSmallIntegerField() def __str__(self): return self.name class ProxyObjectB(ObjectB): class Meta: proxy = True class ObjectC(models.Model): name = models.CharField(max_length=50) objecta = models.ForeignKey(ObjectA, models.SET_NULL, null=True) objectb = models.ForeignKey(ObjectB, models.SET_NULL, null=True) childobjecta = models.ForeignKey(ChildObjectA, models.SET_NULL, null=True, related_name='ca_pk') def __str__(self): return self.name class SimpleCategory(models.Model): name = models.CharField(max_length=15) def __str__(self): return self.name class SpecialCategory(SimpleCategory): special_name = models.CharField(max_length=15) def __str__(self): return self.name + " " + self.special_name class CategoryItem(models.Model): category = models.ForeignKey(SimpleCategory, models.CASCADE) def __str__(self): return "category item: " + str(self.category) class MixedCaseFieldCategoryItem(models.Model): CaTeGoRy = models.ForeignKey(SimpleCategory, models.CASCADE) class MixedCaseDbColumnCategoryItem(models.Model): category = models.ForeignKey(SimpleCategory, models.CASCADE, db_column='CaTeGoRy_Id') class OneToOneCategory(models.Model): new_name = models.CharField(max_length=15) category = models.OneToOneField(SimpleCategory, models.CASCADE) def __str__(self): return "one2one " + self.new_name class CategoryRelationship(models.Model): first = models.ForeignKey(SimpleCategory, models.CASCADE, related_name='first_rel') second = models.ForeignKey(SimpleCategory, models.CASCADE, related_name='second_rel') class CommonMixedCaseForeignKeys(models.Model): category = models.ForeignKey(CategoryItem, models.CASCADE) mixed_case_field_category = models.ForeignKey(MixedCaseFieldCategoryItem, models.CASCADE) mixed_case_db_column_category = models.ForeignKey(MixedCaseDbColumnCategoryItem, models.CASCADE) class NullableName(models.Model): name = models.CharField(max_length=20, null=True) class Meta: ordering = ['id'] class ModelD(models.Model): name = models.TextField() class ModelC(models.Model): name = models.TextField() class ModelB(models.Model): name = models.TextField() c = models.ForeignKey(ModelC, models.CASCADE) class ModelA(models.Model): name = models.TextField() b = models.ForeignKey(ModelB, models.SET_NULL, null=True) d = models.ForeignKey(ModelD, models.CASCADE) class Job(models.Model): name = models.CharField(max_length=20, unique=True) def __str__(self): return self.name class JobResponsibilities(models.Model): job = models.ForeignKey(Job, models.CASCADE, to_field='name') responsibility = models.ForeignKey('Responsibility', models.CASCADE, to_field='description') class Responsibility(models.Model): description = models.CharField(max_length=20, unique=True) jobs = models.ManyToManyField(Job, through=JobResponsibilities, related_name='responsibilities') def __str__(self): return self.description # Models for disjunction join promotion low level testing. class FK1(models.Model): f1 = models.TextField() f2 = models.TextField() class FK2(models.Model): f1 = models.TextField() f2 = models.TextField() class FK3(models.Model): f1 = models.TextField() f2 = models.TextField() class BaseA(models.Model): a = models.ForeignKey(FK1, models.SET_NULL, null=True) b = models.ForeignKey(FK2, models.SET_NULL, null=True) c = models.ForeignKey(FK3, models.SET_NULL, null=True) class Identifier(models.Model): name = models.CharField(max_length=100) def __str__(self): return self.name class Program(models.Model): identifier = models.OneToOneField(Identifier, models.CASCADE) class Channel(models.Model): programs = models.ManyToManyField(Program) identifier = models.OneToOneField(Identifier, models.CASCADE) class Book(models.Model): title = models.TextField() chapter = models.ForeignKey('Chapter', models.CASCADE) class Chapter(models.Model): title = models.TextField() paragraph = models.ForeignKey('Paragraph', models.CASCADE) class Paragraph(models.Model): text = models.TextField() page = models.ManyToManyField('Page') class Page(models.Model): text = models.TextField() class MyObject(models.Model): parent = models.ForeignKey('self', models.SET_NULL, null=True, blank=True, related_name='children') data = models.CharField(max_length=100) created_at = models.DateTimeField(auto_now_add=True) # Models for #17600 regressions class Order(models.Model): id = models.IntegerField(primary_key=True) name = models.CharField(max_length=12, null=True, default='') class Meta: ordering = ('pk',) def __str__(self): return '%s' % self.pk class OrderItem(models.Model): order = models.ForeignKey(Order, models.CASCADE, related_name='items') status = models.IntegerField() class Meta: ordering = ('pk',) def __str__(self): return '%s' % self.pk class BaseUser(models.Model): pass class Task(models.Model): title = models.CharField(max_length=10) owner = models.ForeignKey(BaseUser, models.CASCADE, related_name='owner') creator = models.ForeignKey(BaseUser, models.CASCADE, related_name='creator') def __str__(self): return self.title class Staff(models.Model): name = models.CharField(max_length=10) def __str__(self): return self.name class StaffUser(BaseUser): staff = models.OneToOneField(Staff, models.CASCADE, related_name='user') def __str__(self): return self.staff class Ticket21203Parent(models.Model): parentid = models.AutoField(primary_key=True) parent_bool = models.BooleanField(default=True) created = models.DateTimeField(auto_now=True) class Ticket21203Child(models.Model): childid = models.AutoField(primary_key=True) parent = models.ForeignKey(Ticket21203Parent, models.CASCADE) class Person(models.Model): name = models.CharField(max_length=128) class Company(models.Model): name = models.CharField(max_length=128) employees = models.ManyToManyField(Person, related_name='employers', through='Employment') def __str__(self): return self.name class Employment(models.Model): employer = models.ForeignKey(Company, models.CASCADE) employee = models.ForeignKey(Person, models.CASCADE) title = models.CharField(max_length=128) class School(models.Model): pass class Student(models.Model): school = models.ForeignKey(School, models.CASCADE) class Classroom(models.Model): name = models.CharField(max_length=20) has_blackboard = models.BooleanField(null=True) school = models.ForeignKey(School, models.CASCADE) students = models.ManyToManyField(Student, related_name='classroom') class Teacher(models.Model): schools = models.ManyToManyField(School) friends = models.ManyToManyField('self') class Ticket23605AParent(models.Model): pass class Ticket23605A(Ticket23605AParent): pass class Ticket23605B(models.Model): modela_fk = models.ForeignKey(Ticket23605A, models.CASCADE) modelc_fk = models.ForeignKey("Ticket23605C", models.CASCADE) field_b0 = models.IntegerField(null=True) field_b1 = models.BooleanField(default=False) class Ticket23605C(models.Model): field_c0 = models.FloatField() # db_table names have capital letters to ensure they are quoted in queries. class Individual(models.Model): alive = models.BooleanField() class Meta: db_table = 'Individual' class RelatedIndividual(models.Model): related = models.ForeignKey(Individual, models.CASCADE, related_name='related_individual') class Meta: db_table = 'RelatedIndividual' class CustomDbColumn(models.Model): custom_column = models.IntegerField(db_column='custom_name', null=True) ip_address = models.GenericIPAddressField(null=True)
9c13e9f579dfe834e34a4604ab73f3e9825c7900ee79bfe4cd1826fadd0d8dcb
from django.contrib.contenttypes.fields import ( GenericForeignKey, GenericRelation, ) from django.contrib.contenttypes.models import ContentType from django.db import models class NonIntegerAutoField(models.Model): creation_datetime = models.DateTimeField(primary_key=True) class Square(models.Model): root = models.IntegerField() square = models.PositiveIntegerField() def __str__(self): return "%s ** 2 == %s" % (self.root, self.square) class Person(models.Model): first_name = models.CharField(max_length=20) last_name = models.CharField(max_length=20) def __str__(self): return '%s %s' % (self.first_name, self.last_name) class SchoolClass(models.Model): year = models.PositiveIntegerField() day = models.CharField(max_length=9, blank=True) last_updated = models.DateTimeField() class VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ(models.Model): primary_key_is_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz = models.AutoField(primary_key=True) charfield_is_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz = models.CharField(max_length=100) m2m_also_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz = models.ManyToManyField(Person, blank=True) class Tag(models.Model): name = models.CharField(max_length=30) content_type = models.ForeignKey(ContentType, models.CASCADE, related_name='backend_tags') object_id = models.PositiveIntegerField() content_object = GenericForeignKey('content_type', 'object_id') class Post(models.Model): name = models.CharField(max_length=30) text = models.TextField() tags = GenericRelation('Tag') class Meta: db_table = 'CaseSensitive_Post' class Reporter(models.Model): first_name = models.CharField(max_length=30) last_name = models.CharField(max_length=30) def __str__(self): return "%s %s" % (self.first_name, self.last_name) class ReporterProxy(Reporter): class Meta: proxy = True class Article(models.Model): headline = models.CharField(max_length=100) pub_date = models.DateField() reporter = models.ForeignKey(Reporter, models.CASCADE) reporter_proxy = models.ForeignKey( ReporterProxy, models.SET_NULL, null=True, related_name='reporter_proxy', ) def __str__(self): return self.headline class Item(models.Model): name = models.CharField(max_length=30) date = models.DateField() time = models.TimeField() last_modified = models.DateTimeField() def __str__(self): return self.name class Object(models.Model): related_objects = models.ManyToManyField("self", db_constraint=False, symmetrical=False) def __str__(self): return str(self.id) class ObjectReference(models.Model): obj = models.ForeignKey(Object, models.CASCADE, db_constraint=False) def __str__(self): return str(self.obj_id) class RawData(models.Model): raw_data = models.BinaryField() class Author(models.Model): name = models.CharField(max_length=255, unique=True) class Book(models.Model): author = models.ForeignKey(Author, models.CASCADE, to_field='name')
b31b75678e63865a3f4c0619c2c52a5e8098fd3f7e8687873150d4eacc4368e4
import datetime import itertools import unittest from copy import copy from unittest import mock from django.core.management.color import no_style from django.db import ( DatabaseError, IntegrityError, OperationalError, connection, ) from django.db.models import Index, Model, Q from django.db.models.constraints import CheckConstraint, UniqueConstraint from django.db.models.deletion import CASCADE, PROTECT from django.db.models.fields import ( AutoField, BigAutoField, BigIntegerField, BinaryField, BooleanField, CharField, DateField, DateTimeField, IntegerField, PositiveIntegerField, SlugField, SmallAutoField, SmallIntegerField, TextField, TimeField, UUIDField, ) from django.db.models.fields.related import ( ForeignKey, ForeignObject, ManyToManyField, OneToOneField, ) from django.db.transaction import TransactionManagementError, atomic from django.db.utils import DataError from django.test import ( TransactionTestCase, skipIfDBFeature, skipUnlessDBFeature, ) from django.test.utils import CaptureQueriesContext, isolate_apps from django.utils import timezone from .fields import ( CustomManyToManyField, InheritedManyToManyField, MediumBlobField, ) from .models import ( Author, AuthorCharFieldWithIndex, AuthorTextFieldWithIndex, AuthorWithDefaultHeight, AuthorWithEvenLongerName, AuthorWithIndexedName, AuthorWithIndexedNameAndBirthday, AuthorWithUniqueName, AuthorWithUniqueNameAndBirthday, Book, BookForeignObj, BookWeak, BookWithLongName, BookWithO2O, BookWithoutAuthor, BookWithSlug, IntegerPK, Node, Note, NoteRename, Tag, TagIndexed, TagM2MTest, TagUniqueRename, Thing, UniqueTest, new_apps, ) class SchemaTests(TransactionTestCase): """ Tests for the schema-alteration code. Be aware that these tests are more liable than most to false results, as sometimes the code to check if a test has worked is almost as complex as the code it is testing. """ available_apps = [] models = [ Author, AuthorCharFieldWithIndex, AuthorTextFieldWithIndex, AuthorWithDefaultHeight, AuthorWithEvenLongerName, Book, BookWeak, BookWithLongName, BookWithO2O, BookWithSlug, IntegerPK, Node, Note, Tag, TagIndexed, TagM2MTest, TagUniqueRename, Thing, UniqueTest, ] # Utility functions def setUp(self): # local_models should contain test dependent model classes that will be # automatically removed from the app cache on test tear down. self.local_models = [] # isolated_local_models contains models that are in test methods # decorated with @isolate_apps. self.isolated_local_models = [] def tearDown(self): # Delete any tables made for our models self.delete_tables() new_apps.clear_cache() for model in new_apps.get_models(): model._meta._expire_cache() if 'schema' in new_apps.all_models: for model in self.local_models: for many_to_many in model._meta.many_to_many: through = many_to_many.remote_field.through if through and through._meta.auto_created: del new_apps.all_models['schema'][through._meta.model_name] del new_apps.all_models['schema'][model._meta.model_name] if self.isolated_local_models: with connection.schema_editor() as editor: for model in self.isolated_local_models: editor.delete_model(model) def delete_tables(self): "Deletes all model tables for our models for a clean test environment" converter = connection.introspection.identifier_converter with connection.schema_editor() as editor: connection.disable_constraint_checking() table_names = connection.introspection.table_names() for model in itertools.chain(SchemaTests.models, self.local_models): tbl = converter(model._meta.db_table) if tbl in table_names: editor.delete_model(model) table_names.remove(tbl) connection.enable_constraint_checking() def column_classes(self, model): with connection.cursor() as cursor: columns = { d[0]: (connection.introspection.get_field_type(d[1], d), d) for d in connection.introspection.get_table_description( cursor, model._meta.db_table, ) } # SQLite has a different format for field_type for name, (type, desc) in columns.items(): if isinstance(type, tuple): columns[name] = (type[0], desc) # SQLite also doesn't error properly if not columns: raise DatabaseError("Table does not exist (empty pragma)") return columns def get_primary_key(self, table): with connection.cursor() as cursor: return connection.introspection.get_primary_key_column(cursor, table) def get_indexes(self, table): """ Get the indexes on the table using a new cursor. """ with connection.cursor() as cursor: return [ c['columns'][0] for c in connection.introspection.get_constraints(cursor, table).values() if c['index'] and len(c['columns']) == 1 ] def get_uniques(self, table): with connection.cursor() as cursor: return [ c['columns'][0] for c in connection.introspection.get_constraints(cursor, table).values() if c['unique'] and len(c['columns']) == 1 ] def get_constraints(self, table): """ Get the constraints on a table using a new cursor. """ with connection.cursor() as cursor: return connection.introspection.get_constraints(cursor, table) def get_constraints_for_column(self, model, column_name): constraints = self.get_constraints(model._meta.db_table) constraints_for_column = [] for name, details in constraints.items(): if details['columns'] == [column_name]: constraints_for_column.append(name) return sorted(constraints_for_column) def check_added_field_default(self, schema_editor, model, field, field_name, expected_default, cast_function=None): with connection.cursor() as cursor: schema_editor.add_field(model, field) cursor.execute("SELECT {} FROM {};".format(field_name, model._meta.db_table)) database_default = cursor.fetchall()[0][0] if cast_function and not type(database_default) == type(expected_default): database_default = cast_function(database_default) self.assertEqual(database_default, expected_default) def get_constraints_count(self, table, column, fk_to): """ Return a dict with keys 'fks', 'uniques, and 'indexes' indicating the number of foreign keys, unique constraints, and indexes on `table`.`column`. The `fk_to` argument is a 2-tuple specifying the expected foreign key relationship's (table, column). """ with connection.cursor() as cursor: constraints = connection.introspection.get_constraints(cursor, table) counts = {'fks': 0, 'uniques': 0, 'indexes': 0} for c in constraints.values(): if c['columns'] == [column]: if c['foreign_key'] == fk_to: counts['fks'] += 1 if c['unique']: counts['uniques'] += 1 elif c['index']: counts['indexes'] += 1 return counts def assertIndexOrder(self, table, index, order): constraints = self.get_constraints(table) self.assertIn(index, constraints) index_orders = constraints[index]['orders'] self.assertTrue(all(val == expected for val, expected in zip(index_orders, order))) def assertForeignKeyExists(self, model, column, expected_fk_table, field='id'): """ Fail if the FK constraint on `model.Meta.db_table`.`column` to `expected_fk_table`.id doesn't exist. """ constraints = self.get_constraints(model._meta.db_table) constraint_fk = None for details in constraints.values(): if details['columns'] == [column] and details['foreign_key']: constraint_fk = details['foreign_key'] break self.assertEqual(constraint_fk, (expected_fk_table, field)) def assertForeignKeyNotExists(self, model, column, expected_fk_table): with self.assertRaises(AssertionError): self.assertForeignKeyExists(model, column, expected_fk_table) # Tests def test_creation_deletion(self): """ Tries creating a model's table, and then deleting it. """ with connection.schema_editor() as editor: # Create the table editor.create_model(Author) # The table is there list(Author.objects.all()) # Clean up that table editor.delete_model(Author) # No deferred SQL should be left over. self.assertEqual(editor.deferred_sql, []) # The table is gone with self.assertRaises(DatabaseError): list(Author.objects.all()) @skipUnlessDBFeature('supports_foreign_keys') def test_fk(self): "Creating tables out of FK order, then repointing, works" # Create the table with connection.schema_editor() as editor: editor.create_model(Book) editor.create_model(Author) editor.create_model(Tag) # Initial tables are there list(Author.objects.all()) list(Book.objects.all()) # Make sure the FK constraint is present with self.assertRaises(IntegrityError): Book.objects.create( author_id=1, title="Much Ado About Foreign Keys", pub_date=datetime.datetime.now(), ) # Repoint the FK constraint old_field = Book._meta.get_field("author") new_field = ForeignKey(Tag, CASCADE) new_field.set_attributes_from_name("author") with connection.schema_editor() as editor: editor.alter_field(Book, old_field, new_field, strict=True) self.assertForeignKeyExists(Book, 'author_id', 'schema_tag') @skipUnlessDBFeature('can_create_inline_fk') def test_inline_fk(self): # Create some tables. with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) editor.create_model(Note) self.assertForeignKeyNotExists(Note, 'book_id', 'schema_book') # Add a foreign key from one to the other. with connection.schema_editor() as editor: new_field = ForeignKey(Book, CASCADE) new_field.set_attributes_from_name('book') editor.add_field(Note, new_field) self.assertForeignKeyExists(Note, 'book_id', 'schema_book') # Creating a FK field with a constraint uses a single statement without # a deferred ALTER TABLE. self.assertFalse([ sql for sql in (str(statement) for statement in editor.deferred_sql) if sql.startswith('ALTER TABLE') and 'ADD CONSTRAINT' in sql ]) @skipUnlessDBFeature('supports_foreign_keys') def test_char_field_with_db_index_to_fk(self): # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(AuthorCharFieldWithIndex) # Change CharField to FK old_field = AuthorCharFieldWithIndex._meta.get_field('char_field') new_field = ForeignKey(Author, CASCADE, blank=True) new_field.set_attributes_from_name('char_field') with connection.schema_editor() as editor: editor.alter_field(AuthorCharFieldWithIndex, old_field, new_field, strict=True) self.assertForeignKeyExists(AuthorCharFieldWithIndex, 'char_field_id', 'schema_author') @skipUnlessDBFeature('supports_foreign_keys') @skipUnlessDBFeature('supports_index_on_text_field') def test_text_field_with_db_index_to_fk(self): # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(AuthorTextFieldWithIndex) # Change TextField to FK old_field = AuthorTextFieldWithIndex._meta.get_field('text_field') new_field = ForeignKey(Author, CASCADE, blank=True) new_field.set_attributes_from_name('text_field') with connection.schema_editor() as editor: editor.alter_field(AuthorTextFieldWithIndex, old_field, new_field, strict=True) self.assertForeignKeyExists(AuthorTextFieldWithIndex, 'text_field_id', 'schema_author') @skipUnlessDBFeature('supports_foreign_keys') def test_fk_to_proxy(self): "Creating a FK to a proxy model creates database constraints." class AuthorProxy(Author): class Meta: app_label = 'schema' apps = new_apps proxy = True class AuthorRef(Model): author = ForeignKey(AuthorProxy, on_delete=CASCADE) class Meta: app_label = 'schema' apps = new_apps self.local_models = [AuthorProxy, AuthorRef] # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(AuthorRef) self.assertForeignKeyExists(AuthorRef, 'author_id', 'schema_author') @skipUnlessDBFeature('supports_foreign_keys') def test_fk_db_constraint(self): "The db_constraint parameter is respected" # Create the table with connection.schema_editor() as editor: editor.create_model(Tag) editor.create_model(Author) editor.create_model(BookWeak) # Initial tables are there list(Author.objects.all()) list(Tag.objects.all()) list(BookWeak.objects.all()) self.assertForeignKeyNotExists(BookWeak, 'author_id', 'schema_author') # Make a db_constraint=False FK new_field = ForeignKey(Tag, CASCADE, db_constraint=False) new_field.set_attributes_from_name("tag") with connection.schema_editor() as editor: editor.add_field(Author, new_field) self.assertForeignKeyNotExists(Author, 'tag_id', 'schema_tag') # Alter to one with a constraint new_field2 = ForeignKey(Tag, CASCADE) new_field2.set_attributes_from_name("tag") with connection.schema_editor() as editor: editor.alter_field(Author, new_field, new_field2, strict=True) self.assertForeignKeyExists(Author, 'tag_id', 'schema_tag') # Alter to one without a constraint again new_field2 = ForeignKey(Tag, CASCADE) new_field2.set_attributes_from_name("tag") with connection.schema_editor() as editor: editor.alter_field(Author, new_field2, new_field, strict=True) self.assertForeignKeyNotExists(Author, 'tag_id', 'schema_tag') @isolate_apps('schema') def test_no_db_constraint_added_during_primary_key_change(self): """ When a primary key that's pointed to by a ForeignKey with db_constraint=False is altered, a foreign key constraint isn't added. """ class Author(Model): class Meta: app_label = 'schema' class BookWeak(Model): author = ForeignKey(Author, CASCADE, db_constraint=False) class Meta: app_label = 'schema' with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(BookWeak) self.assertForeignKeyNotExists(BookWeak, 'author_id', 'schema_author') old_field = Author._meta.get_field('id') new_field = BigAutoField(primary_key=True) new_field.model = Author new_field.set_attributes_from_name('id') # @isolate_apps() and inner models are needed to have the model # relations populated, otherwise this doesn't act as a regression test. self.assertEqual(len(new_field.model._meta.related_objects), 1) with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) self.assertForeignKeyNotExists(BookWeak, 'author_id', 'schema_author') def _test_m2m_db_constraint(self, M2MFieldClass): class LocalAuthorWithM2M(Model): name = CharField(max_length=255) class Meta: app_label = 'schema' apps = new_apps self.local_models = [LocalAuthorWithM2M] # Create the table with connection.schema_editor() as editor: editor.create_model(Tag) editor.create_model(LocalAuthorWithM2M) # Initial tables are there list(LocalAuthorWithM2M.objects.all()) list(Tag.objects.all()) # Make a db_constraint=False FK new_field = M2MFieldClass(Tag, related_name="authors", db_constraint=False) new_field.contribute_to_class(LocalAuthorWithM2M, "tags") # Add the field with connection.schema_editor() as editor: editor.add_field(LocalAuthorWithM2M, new_field) self.assertForeignKeyNotExists(new_field.remote_field.through, 'tag_id', 'schema_tag') @skipUnlessDBFeature('supports_foreign_keys') def test_m2m_db_constraint(self): self._test_m2m_db_constraint(ManyToManyField) @skipUnlessDBFeature('supports_foreign_keys') def test_m2m_db_constraint_custom(self): self._test_m2m_db_constraint(CustomManyToManyField) @skipUnlessDBFeature('supports_foreign_keys') def test_m2m_db_constraint_inherited(self): self._test_m2m_db_constraint(InheritedManyToManyField) def test_add_field(self): """ Tests adding fields to models """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure there's no age field columns = self.column_classes(Author) self.assertNotIn("age", columns) # Add the new field new_field = IntegerField(null=True) new_field.set_attributes_from_name("age") with CaptureQueriesContext(connection) as ctx, connection.schema_editor() as editor: editor.add_field(Author, new_field) drop_default_sql = editor.sql_alter_column_no_default % { 'column': editor.quote_name(new_field.name), } self.assertFalse(any(drop_default_sql in query['sql'] for query in ctx.captured_queries)) # Ensure the field is right afterwards columns = self.column_classes(Author) self.assertEqual(columns['age'][0], "IntegerField") self.assertEqual(columns['age'][1][6], True) def test_add_field_remove_field(self): """ Adding a field and removing it removes all deferred sql referring to it. """ with connection.schema_editor() as editor: # Create a table with a unique constraint on the slug field. editor.create_model(Tag) # Remove the slug column. editor.remove_field(Tag, Tag._meta.get_field('slug')) self.assertEqual(editor.deferred_sql, []) def test_add_field_temp_default(self): """ Tests adding fields to models with a temporary default """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure there's no age field columns = self.column_classes(Author) self.assertNotIn("age", columns) # Add some rows of data Author.objects.create(name="Andrew", height=30) Author.objects.create(name="Andrea") # Add a not-null field new_field = CharField(max_length=30, default="Godwin") new_field.set_attributes_from_name("surname") with connection.schema_editor() as editor: editor.add_field(Author, new_field) # Ensure the field is right afterwards columns = self.column_classes(Author) self.assertEqual(columns['surname'][0], "CharField") self.assertEqual(columns['surname'][1][6], connection.features.interprets_empty_strings_as_nulls) def test_add_field_temp_default_boolean(self): """ Tests adding fields to models with a temporary default where the default is False. (#21783) """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure there's no age field columns = self.column_classes(Author) self.assertNotIn("age", columns) # Add some rows of data Author.objects.create(name="Andrew", height=30) Author.objects.create(name="Andrea") # Add a not-null field new_field = BooleanField(default=False) new_field.set_attributes_from_name("awesome") with connection.schema_editor() as editor: editor.add_field(Author, new_field) # Ensure the field is right afterwards columns = self.column_classes(Author) # BooleanField are stored as TINYINT(1) on MySQL. field_type = columns['awesome'][0] self.assertEqual(field_type, connection.features.introspected_boolean_field_type) def test_add_field_default_transform(self): """ Tests adding fields to models with a default that is not directly valid in the database (#22581) """ class TestTransformField(IntegerField): # Weird field that saves the count of items in its value def get_default(self): return self.default def get_prep_value(self, value): if value is None: return 0 return len(value) # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Add some rows of data Author.objects.create(name="Andrew", height=30) Author.objects.create(name="Andrea") # Add the field with a default it needs to cast (to string in this case) new_field = TestTransformField(default={1: 2}) new_field.set_attributes_from_name("thing") with connection.schema_editor() as editor: editor.add_field(Author, new_field) # Ensure the field is there columns = self.column_classes(Author) field_type, field_info = columns['thing'] self.assertEqual(field_type, 'IntegerField') # Make sure the values were transformed correctly self.assertEqual(Author.objects.extra(where=["thing = 1"]).count(), 2) def test_add_field_binary(self): """ Tests binary fields get a sane default (#22851) """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Add the new field new_field = BinaryField(blank=True) new_field.set_attributes_from_name("bits") with connection.schema_editor() as editor: editor.add_field(Author, new_field) # Ensure the field is right afterwards columns = self.column_classes(Author) # MySQL annoyingly uses the same backend, so it'll come back as one of # these two types. self.assertIn(columns['bits'][0], ("BinaryField", "TextField")) @unittest.skipUnless(connection.vendor == 'mysql', "MySQL specific") def test_add_binaryfield_mediumblob(self): """ Test adding a custom-sized binary field on MySQL (#24846). """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Add the new field with default new_field = MediumBlobField(blank=True, default=b'123') new_field.set_attributes_from_name('bits') with connection.schema_editor() as editor: editor.add_field(Author, new_field) columns = self.column_classes(Author) # Introspection treats BLOBs as TextFields self.assertEqual(columns['bits'][0], "TextField") def test_alter(self): """ Tests simple altering of fields """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure the field is right to begin with columns = self.column_classes(Author) self.assertEqual(columns['name'][0], "CharField") self.assertEqual(bool(columns['name'][1][6]), bool(connection.features.interprets_empty_strings_as_nulls)) # Alter the name field to a TextField old_field = Author._meta.get_field("name") new_field = TextField(null=True) new_field.set_attributes_from_name("name") with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) # Ensure the field is right afterwards columns = self.column_classes(Author) self.assertEqual(columns['name'][0], "TextField") self.assertEqual(columns['name'][1][6], True) # Change nullability again new_field2 = TextField(null=False) new_field2.set_attributes_from_name("name") with connection.schema_editor() as editor: editor.alter_field(Author, new_field, new_field2, strict=True) # Ensure the field is right afterwards columns = self.column_classes(Author) self.assertEqual(columns['name'][0], "TextField") self.assertEqual(bool(columns['name'][1][6]), bool(connection.features.interprets_empty_strings_as_nulls)) def test_alter_auto_field_to_integer_field(self): # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Change AutoField to IntegerField old_field = Author._meta.get_field('id') new_field = IntegerField(primary_key=True) new_field.set_attributes_from_name('id') new_field.model = Author with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) def test_alter_auto_field_to_char_field(self): # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Change AutoField to CharField old_field = Author._meta.get_field('id') new_field = CharField(primary_key=True, max_length=50) new_field.set_attributes_from_name('id') new_field.model = Author with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) @isolate_apps('schema') def test_alter_auto_field_quoted_db_column(self): class Foo(Model): id = AutoField(primary_key=True, db_column='"quoted_id"') class Meta: app_label = 'schema' with connection.schema_editor() as editor: editor.create_model(Foo) self.isolated_local_models = [Foo] old_field = Foo._meta.get_field('id') new_field = BigAutoField(primary_key=True) new_field.model = Foo new_field.db_column = '"quoted_id"' new_field.set_attributes_from_name('id') with connection.schema_editor() as editor: editor.alter_field(Foo, old_field, new_field, strict=True) Foo.objects.create() def test_alter_not_unique_field_to_primary_key(self): # Create the table. with connection.schema_editor() as editor: editor.create_model(Author) # Change UUIDField to primary key. old_field = Author._meta.get_field('uuid') new_field = UUIDField(primary_key=True) new_field.set_attributes_from_name('uuid') new_field.model = Author with connection.schema_editor() as editor: editor.remove_field(Author, Author._meta.get_field('id')) editor.alter_field(Author, old_field, new_field, strict=True) @isolate_apps('schema') def test_alter_primary_key_quoted_db_table(self): class Foo(Model): class Meta: app_label = 'schema' db_table = '"foo"' with connection.schema_editor() as editor: editor.create_model(Foo) self.isolated_local_models = [Foo] old_field = Foo._meta.get_field('id') new_field = BigAutoField(primary_key=True) new_field.model = Foo new_field.set_attributes_from_name('id') with connection.schema_editor() as editor: editor.alter_field(Foo, old_field, new_field, strict=True) Foo.objects.create() def test_alter_text_field(self): # Regression for "BLOB/TEXT column 'info' can't have a default value") # on MySQL. # Create the table with connection.schema_editor() as editor: editor.create_model(Note) old_field = Note._meta.get_field("info") new_field = TextField(blank=True) new_field.set_attributes_from_name("info") with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) @skipUnlessDBFeature('can_defer_constraint_checks', 'can_rollback_ddl') def test_alter_fk_checks_deferred_constraints(self): """ #25492 - Altering a foreign key's structure and data in the same transaction. """ with connection.schema_editor() as editor: editor.create_model(Node) old_field = Node._meta.get_field('parent') new_field = ForeignKey(Node, CASCADE) new_field.set_attributes_from_name('parent') parent = Node.objects.create() with connection.schema_editor() as editor: # Update the parent FK to create a deferred constraint check. Node.objects.update(parent=parent) editor.alter_field(Node, old_field, new_field, strict=True) def test_alter_text_field_to_date_field(self): """ #25002 - Test conversion of text field to date field. """ with connection.schema_editor() as editor: editor.create_model(Note) Note.objects.create(info='1988-05-05') old_field = Note._meta.get_field('info') new_field = DateField(blank=True) new_field.set_attributes_from_name('info') with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) # Make sure the field isn't nullable columns = self.column_classes(Note) self.assertFalse(columns['info'][1][6]) def test_alter_text_field_to_datetime_field(self): """ #25002 - Test conversion of text field to datetime field. """ with connection.schema_editor() as editor: editor.create_model(Note) Note.objects.create(info='1988-05-05 3:16:17.4567') old_field = Note._meta.get_field('info') new_field = DateTimeField(blank=True) new_field.set_attributes_from_name('info') with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) # Make sure the field isn't nullable columns = self.column_classes(Note) self.assertFalse(columns['info'][1][6]) def test_alter_text_field_to_time_field(self): """ #25002 - Test conversion of text field to time field. """ with connection.schema_editor() as editor: editor.create_model(Note) Note.objects.create(info='3:16:17.4567') old_field = Note._meta.get_field('info') new_field = TimeField(blank=True) new_field.set_attributes_from_name('info') with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) # Make sure the field isn't nullable columns = self.column_classes(Note) self.assertFalse(columns['info'][1][6]) @skipIfDBFeature('interprets_empty_strings_as_nulls') def test_alter_textual_field_keep_null_status(self): """ Changing a field type shouldn't affect the not null status. """ with connection.schema_editor() as editor: editor.create_model(Note) with self.assertRaises(IntegrityError): Note.objects.create(info=None) old_field = Note._meta.get_field("info") new_field = CharField(max_length=50) new_field.set_attributes_from_name("info") with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) with self.assertRaises(IntegrityError): Note.objects.create(info=None) def test_alter_numeric_field_keep_null_status(self): """ Changing a field type shouldn't affect the not null status. """ with connection.schema_editor() as editor: editor.create_model(UniqueTest) with self.assertRaises(IntegrityError): UniqueTest.objects.create(year=None, slug='aaa') old_field = UniqueTest._meta.get_field("year") new_field = BigIntegerField() new_field.set_attributes_from_name("year") with connection.schema_editor() as editor: editor.alter_field(UniqueTest, old_field, new_field, strict=True) with self.assertRaises(IntegrityError): UniqueTest.objects.create(year=None, slug='bbb') def test_alter_null_to_not_null(self): """ #23609 - Tests handling of default values when altering from NULL to NOT NULL. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure the field is right to begin with columns = self.column_classes(Author) self.assertTrue(columns['height'][1][6]) # Create some test data Author.objects.create(name='Not null author', height=12) Author.objects.create(name='Null author') # Verify null value self.assertEqual(Author.objects.get(name='Not null author').height, 12) self.assertIsNone(Author.objects.get(name='Null author').height) # Alter the height field to NOT NULL with default old_field = Author._meta.get_field("height") new_field = PositiveIntegerField(default=42) new_field.set_attributes_from_name("height") with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) # Ensure the field is right afterwards columns = self.column_classes(Author) self.assertFalse(columns['height'][1][6]) # Verify default value self.assertEqual(Author.objects.get(name='Not null author').height, 12) self.assertEqual(Author.objects.get(name='Null author').height, 42) def test_alter_charfield_to_null(self): """ #24307 - Should skip an alter statement on databases with interprets_empty_strings_as_null when changing a CharField to null. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Change the CharField to null old_field = Author._meta.get_field('name') new_field = copy(old_field) new_field.null = True with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) @unittest.skipUnless(connection.vendor == 'postgresql', 'PostgreSQL specific') def test_alter_char_field_decrease_length(self): # Create the table. with connection.schema_editor() as editor: editor.create_model(Author) Author.objects.create(name='x' * 255) # Change max_length of CharField. old_field = Author._meta.get_field('name') new_field = CharField(max_length=254) new_field.set_attributes_from_name('name') with connection.schema_editor() as editor: msg = 'value too long for type character varying(254)' with self.assertRaisesMessage(DataError, msg): editor.alter_field(Author, old_field, new_field, strict=True) def test_alter_textfield_to_null(self): """ #24307 - Should skip an alter statement on databases with interprets_empty_strings_as_null when changing a TextField to null. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Note) # Change the TextField to null old_field = Note._meta.get_field('info') new_field = copy(old_field) new_field.null = True with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) @skipUnlessDBFeature('supports_combined_alters') def test_alter_null_to_not_null_keeping_default(self): """ #23738 - Can change a nullable field with default to non-nullable with the same default. """ # Create the table with connection.schema_editor() as editor: editor.create_model(AuthorWithDefaultHeight) # Ensure the field is right to begin with columns = self.column_classes(AuthorWithDefaultHeight) self.assertTrue(columns['height'][1][6]) # Alter the height field to NOT NULL keeping the previous default old_field = AuthorWithDefaultHeight._meta.get_field("height") new_field = PositiveIntegerField(default=42) new_field.set_attributes_from_name("height") with connection.schema_editor() as editor: editor.alter_field(AuthorWithDefaultHeight, old_field, new_field, strict=True) # Ensure the field is right afterwards columns = self.column_classes(AuthorWithDefaultHeight) self.assertFalse(columns['height'][1][6]) @skipUnlessDBFeature('supports_foreign_keys') def test_alter_fk(self): """ Tests altering of FKs """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) # Ensure the field is right to begin with columns = self.column_classes(Book) self.assertEqual(columns['author_id'][0], "IntegerField") self.assertForeignKeyExists(Book, 'author_id', 'schema_author') # Alter the FK old_field = Book._meta.get_field("author") new_field = ForeignKey(Author, CASCADE, editable=False) new_field.set_attributes_from_name("author") with connection.schema_editor() as editor: editor.alter_field(Book, old_field, new_field, strict=True) # Ensure the field is right afterwards columns = self.column_classes(Book) self.assertEqual(columns['author_id'][0], "IntegerField") self.assertForeignKeyExists(Book, 'author_id', 'schema_author') @skipUnlessDBFeature('supports_foreign_keys') def test_alter_to_fk(self): """ #24447 - Tests adding a FK constraint for an existing column """ class LocalBook(Model): author = IntegerField() title = CharField(max_length=100, db_index=True) pub_date = DateTimeField() class Meta: app_label = 'schema' apps = new_apps self.local_models = [LocalBook] # Create the tables with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(LocalBook) # Ensure no FK constraint exists constraints = self.get_constraints(LocalBook._meta.db_table) for details in constraints.values(): if details['foreign_key']: self.fail('Found an unexpected FK constraint to %s' % details['columns']) old_field = LocalBook._meta.get_field("author") new_field = ForeignKey(Author, CASCADE) new_field.set_attributes_from_name("author") with connection.schema_editor() as editor: editor.alter_field(LocalBook, old_field, new_field, strict=True) self.assertForeignKeyExists(LocalBook, 'author_id', 'schema_author') @skipUnlessDBFeature('supports_foreign_keys') def test_alter_o2o_to_fk(self): """ #24163 - Tests altering of OneToOneField to ForeignKey """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(BookWithO2O) # Ensure the field is right to begin with columns = self.column_classes(BookWithO2O) self.assertEqual(columns['author_id'][0], "IntegerField") # Ensure the field is unique author = Author.objects.create(name="Joe") BookWithO2O.objects.create(author=author, title="Django 1", pub_date=datetime.datetime.now()) with self.assertRaises(IntegrityError): BookWithO2O.objects.create(author=author, title="Django 2", pub_date=datetime.datetime.now()) BookWithO2O.objects.all().delete() self.assertForeignKeyExists(BookWithO2O, 'author_id', 'schema_author') # Alter the OneToOneField to ForeignKey old_field = BookWithO2O._meta.get_field("author") new_field = ForeignKey(Author, CASCADE) new_field.set_attributes_from_name("author") with connection.schema_editor() as editor: editor.alter_field(BookWithO2O, old_field, new_field, strict=True) # Ensure the field is right afterwards columns = self.column_classes(Book) self.assertEqual(columns['author_id'][0], "IntegerField") # Ensure the field is not unique anymore Book.objects.create(author=author, title="Django 1", pub_date=datetime.datetime.now()) Book.objects.create(author=author, title="Django 2", pub_date=datetime.datetime.now()) self.assertForeignKeyExists(Book, 'author_id', 'schema_author') @skipUnlessDBFeature('supports_foreign_keys') def test_alter_fk_to_o2o(self): """ #24163 - Tests altering of ForeignKey to OneToOneField """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) # Ensure the field is right to begin with columns = self.column_classes(Book) self.assertEqual(columns['author_id'][0], "IntegerField") # Ensure the field is not unique author = Author.objects.create(name="Joe") Book.objects.create(author=author, title="Django 1", pub_date=datetime.datetime.now()) Book.objects.create(author=author, title="Django 2", pub_date=datetime.datetime.now()) Book.objects.all().delete() self.assertForeignKeyExists(Book, 'author_id', 'schema_author') # Alter the ForeignKey to OneToOneField old_field = Book._meta.get_field("author") new_field = OneToOneField(Author, CASCADE) new_field.set_attributes_from_name("author") with connection.schema_editor() as editor: editor.alter_field(Book, old_field, new_field, strict=True) # Ensure the field is right afterwards columns = self.column_classes(BookWithO2O) self.assertEqual(columns['author_id'][0], "IntegerField") # Ensure the field is unique now BookWithO2O.objects.create(author=author, title="Django 1", pub_date=datetime.datetime.now()) with self.assertRaises(IntegrityError): BookWithO2O.objects.create(author=author, title="Django 2", pub_date=datetime.datetime.now()) self.assertForeignKeyExists(BookWithO2O, 'author_id', 'schema_author') def test_alter_field_fk_to_o2o(self): with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) expected_fks = 1 if connection.features.supports_foreign_keys else 0 # Check the index is right to begin with. counts = self.get_constraints_count( Book._meta.db_table, Book._meta.get_field('author').column, (Author._meta.db_table, Author._meta.pk.column), ) self.assertEqual(counts, {'fks': expected_fks, 'uniques': 0, 'indexes': 1}) old_field = Book._meta.get_field('author') new_field = OneToOneField(Author, CASCADE) new_field.set_attributes_from_name('author') with connection.schema_editor() as editor: editor.alter_field(Book, old_field, new_field, strict=True) counts = self.get_constraints_count( Book._meta.db_table, Book._meta.get_field('author').column, (Author._meta.db_table, Author._meta.pk.column), ) # The index on ForeignKey is replaced with a unique constraint for OneToOneField. self.assertEqual(counts, {'fks': expected_fks, 'uniques': 1, 'indexes': 0}) def test_alter_field_fk_keeps_index(self): with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) expected_fks = 1 if connection.features.supports_foreign_keys else 0 # Check the index is right to begin with. counts = self.get_constraints_count( Book._meta.db_table, Book._meta.get_field('author').column, (Author._meta.db_table, Author._meta.pk.column), ) self.assertEqual(counts, {'fks': expected_fks, 'uniques': 0, 'indexes': 1}) old_field = Book._meta.get_field('author') # on_delete changed from CASCADE. new_field = ForeignKey(Author, PROTECT) new_field.set_attributes_from_name('author') with connection.schema_editor() as editor: editor.alter_field(Book, old_field, new_field, strict=True) counts = self.get_constraints_count( Book._meta.db_table, Book._meta.get_field('author').column, (Author._meta.db_table, Author._meta.pk.column), ) # The index remains. self.assertEqual(counts, {'fks': expected_fks, 'uniques': 0, 'indexes': 1}) def test_alter_field_o2o_to_fk(self): with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(BookWithO2O) expected_fks = 1 if connection.features.supports_foreign_keys else 0 # Check the unique constraint is right to begin with. counts = self.get_constraints_count( BookWithO2O._meta.db_table, BookWithO2O._meta.get_field('author').column, (Author._meta.db_table, Author._meta.pk.column), ) self.assertEqual(counts, {'fks': expected_fks, 'uniques': 1, 'indexes': 0}) old_field = BookWithO2O._meta.get_field('author') new_field = ForeignKey(Author, CASCADE) new_field.set_attributes_from_name('author') with connection.schema_editor() as editor: editor.alter_field(BookWithO2O, old_field, new_field, strict=True) counts = self.get_constraints_count( BookWithO2O._meta.db_table, BookWithO2O._meta.get_field('author').column, (Author._meta.db_table, Author._meta.pk.column), ) # The unique constraint on OneToOneField is replaced with an index for ForeignKey. self.assertEqual(counts, {'fks': expected_fks, 'uniques': 0, 'indexes': 1}) def test_alter_field_o2o_keeps_unique(self): with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(BookWithO2O) expected_fks = 1 if connection.features.supports_foreign_keys else 0 # Check the unique constraint is right to begin with. counts = self.get_constraints_count( BookWithO2O._meta.db_table, BookWithO2O._meta.get_field('author').column, (Author._meta.db_table, Author._meta.pk.column), ) self.assertEqual(counts, {'fks': expected_fks, 'uniques': 1, 'indexes': 0}) old_field = BookWithO2O._meta.get_field('author') # on_delete changed from CASCADE. new_field = OneToOneField(Author, PROTECT) new_field.set_attributes_from_name('author') with connection.schema_editor() as editor: editor.alter_field(BookWithO2O, old_field, new_field, strict=True) counts = self.get_constraints_count( BookWithO2O._meta.db_table, BookWithO2O._meta.get_field('author').column, (Author._meta.db_table, Author._meta.pk.column), ) # The unique constraint remains. self.assertEqual(counts, {'fks': expected_fks, 'uniques': 1, 'indexes': 0}) def test_alter_db_table_case(self): # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Alter the case of the table old_table_name = Author._meta.db_table with connection.schema_editor() as editor: editor.alter_db_table(Author, old_table_name, old_table_name.upper()) def test_alter_implicit_id_to_explicit(self): """ Should be able to convert an implicit "id" field to an explicit "id" primary key field. """ with connection.schema_editor() as editor: editor.create_model(Author) old_field = Author._meta.get_field("id") new_field = AutoField(primary_key=True) new_field.set_attributes_from_name("id") new_field.model = Author with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) # This will fail if DROP DEFAULT is inadvertently executed on this # field which drops the id sequence, at least on PostgreSQL. Author.objects.create(name='Foo') Author.objects.create(name='Bar') def test_alter_autofield_pk_to_bigautofield_pk_sequence_owner(self): """ Converting an implicit PK to BigAutoField(primary_key=True) should keep a sequence owner on PostgreSQL. """ with connection.schema_editor() as editor: editor.create_model(Author) old_field = Author._meta.get_field('id') new_field = BigAutoField(primary_key=True) new_field.set_attributes_from_name('id') new_field.model = Author with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) Author.objects.create(name='Foo', pk=1) with connection.cursor() as cursor: sequence_reset_sqls = connection.ops.sequence_reset_sql(no_style(), [Author]) if sequence_reset_sqls: cursor.execute(sequence_reset_sqls[0]) # Fail on PostgreSQL if sequence is missing an owner. self.assertIsNotNone(Author.objects.create(name='Bar')) def test_alter_autofield_pk_to_smallautofield_pk_sequence_owner(self): """ Converting an implicit PK to SmallAutoField(primary_key=True) should keep a sequence owner on PostgreSQL. """ with connection.schema_editor() as editor: editor.create_model(Author) old_field = Author._meta.get_field('id') new_field = SmallAutoField(primary_key=True) new_field.set_attributes_from_name('id') new_field.model = Author with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) Author.objects.create(name='Foo', pk=1) with connection.cursor() as cursor: sequence_reset_sqls = connection.ops.sequence_reset_sql(no_style(), [Author]) if sequence_reset_sqls: cursor.execute(sequence_reset_sqls[0]) # Fail on PostgreSQL if sequence is missing an owner. self.assertIsNotNone(Author.objects.create(name='Bar')) def test_alter_int_pk_to_autofield_pk(self): """ Should be able to rename an IntegerField(primary_key=True) to AutoField(primary_key=True). """ with connection.schema_editor() as editor: editor.create_model(IntegerPK) old_field = IntegerPK._meta.get_field('i') new_field = AutoField(primary_key=True) new_field.model = IntegerPK new_field.set_attributes_from_name('i') with connection.schema_editor() as editor: editor.alter_field(IntegerPK, old_field, new_field, strict=True) def test_alter_int_pk_to_bigautofield_pk(self): """ Should be able to rename an IntegerField(primary_key=True) to BigAutoField(primary_key=True). """ with connection.schema_editor() as editor: editor.create_model(IntegerPK) old_field = IntegerPK._meta.get_field('i') new_field = BigAutoField(primary_key=True) new_field.model = IntegerPK new_field.set_attributes_from_name('i') with connection.schema_editor() as editor: editor.alter_field(IntegerPK, old_field, new_field, strict=True) @isolate_apps('schema') def test_alter_smallint_pk_to_smallautofield_pk(self): """ Should be able to rename an SmallIntegerField(primary_key=True) to SmallAutoField(primary_key=True). """ class SmallIntegerPK(Model): i = SmallIntegerField(primary_key=True) class Meta: app_label = 'schema' with connection.schema_editor() as editor: editor.create_model(SmallIntegerPK) self.isolated_local_models = [SmallIntegerPK] old_field = SmallIntegerPK._meta.get_field('i') new_field = SmallAutoField(primary_key=True) new_field.model = SmallIntegerPK new_field.set_attributes_from_name('i') with connection.schema_editor() as editor: editor.alter_field(SmallIntegerPK, old_field, new_field, strict=True) def test_alter_int_pk_to_int_unique(self): """ Should be able to rename an IntegerField(primary_key=True) to IntegerField(unique=True). """ with connection.schema_editor() as editor: editor.create_model(IntegerPK) # Delete the old PK old_field = IntegerPK._meta.get_field('i') new_field = IntegerField(unique=True) new_field.model = IntegerPK new_field.set_attributes_from_name('i') with connection.schema_editor() as editor: editor.alter_field(IntegerPK, old_field, new_field, strict=True) # The primary key constraint is gone. Result depends on database: # 'id' for SQLite, None for others (must not be 'i'). self.assertIn(self.get_primary_key(IntegerPK._meta.db_table), ('id', None)) # Set up a model class as it currently stands. The original IntegerPK # class is now out of date and some backends make use of the whole # model class when modifying a field (such as sqlite3 when remaking a # table) so an outdated model class leads to incorrect results. class Transitional(Model): i = IntegerField(unique=True) j = IntegerField(unique=True) class Meta: app_label = 'schema' apps = new_apps db_table = 'INTEGERPK' # model requires a new PK old_field = Transitional._meta.get_field('j') new_field = IntegerField(primary_key=True) new_field.model = Transitional new_field.set_attributes_from_name('j') with connection.schema_editor() as editor: editor.alter_field(Transitional, old_field, new_field, strict=True) # Create a model class representing the updated model. class IntegerUnique(Model): i = IntegerField(unique=True) j = IntegerField(primary_key=True) class Meta: app_label = 'schema' apps = new_apps db_table = 'INTEGERPK' # Ensure unique constraint works. IntegerUnique.objects.create(i=1, j=1) with self.assertRaises(IntegrityError): IntegerUnique.objects.create(i=1, j=2) def test_rename(self): """ Tests simple altering of fields """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure the field is right to begin with columns = self.column_classes(Author) self.assertEqual(columns['name'][0], "CharField") self.assertNotIn("display_name", columns) # Alter the name field's name old_field = Author._meta.get_field("name") new_field = CharField(max_length=254) new_field.set_attributes_from_name("display_name") with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) # Ensure the field is right afterwards columns = self.column_classes(Author) self.assertEqual(columns['display_name'][0], "CharField") self.assertNotIn("name", columns) @isolate_apps('schema') def test_rename_referenced_field(self): class Author(Model): name = CharField(max_length=255, unique=True) class Meta: app_label = 'schema' class Book(Model): author = ForeignKey(Author, CASCADE, to_field='name') class Meta: app_label = 'schema' with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) new_field = CharField(max_length=255, unique=True) new_field.set_attributes_from_name('renamed') with connection.schema_editor(atomic=connection.features.supports_atomic_references_rename) as editor: editor.alter_field(Author, Author._meta.get_field('name'), new_field) # Ensure the foreign key reference was updated. self.assertForeignKeyExists(Book, 'author_id', 'schema_author', 'renamed') @skipIfDBFeature('interprets_empty_strings_as_nulls') def test_rename_keep_null_status(self): """ Renaming a field shouldn't affect the not null status. """ with connection.schema_editor() as editor: editor.create_model(Note) with self.assertRaises(IntegrityError): Note.objects.create(info=None) old_field = Note._meta.get_field("info") new_field = TextField() new_field.set_attributes_from_name("detail_info") with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) columns = self.column_classes(Note) self.assertEqual(columns['detail_info'][0], "TextField") self.assertNotIn("info", columns) with self.assertRaises(IntegrityError): NoteRename.objects.create(detail_info=None) def _test_m2m_create(self, M2MFieldClass): """ Tests M2M fields on models during creation """ class LocalBookWithM2M(Model): author = ForeignKey(Author, CASCADE) title = CharField(max_length=100, db_index=True) pub_date = DateTimeField() tags = M2MFieldClass("TagM2MTest", related_name="books") class Meta: app_label = 'schema' apps = new_apps self.local_models = [LocalBookWithM2M] # Create the tables with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(TagM2MTest) editor.create_model(LocalBookWithM2M) # Ensure there is now an m2m table there columns = self.column_classes(LocalBookWithM2M._meta.get_field("tags").remote_field.through) self.assertEqual(columns['tagm2mtest_id'][0], "IntegerField") def test_m2m_create(self): self._test_m2m_create(ManyToManyField) def test_m2m_create_custom(self): self._test_m2m_create(CustomManyToManyField) def test_m2m_create_inherited(self): self._test_m2m_create(InheritedManyToManyField) def _test_m2m_create_through(self, M2MFieldClass): """ Tests M2M fields on models during creation with through models """ class LocalTagThrough(Model): book = ForeignKey("schema.LocalBookWithM2MThrough", CASCADE) tag = ForeignKey("schema.TagM2MTest", CASCADE) class Meta: app_label = 'schema' apps = new_apps class LocalBookWithM2MThrough(Model): tags = M2MFieldClass("TagM2MTest", related_name="books", through=LocalTagThrough) class Meta: app_label = 'schema' apps = new_apps self.local_models = [LocalTagThrough, LocalBookWithM2MThrough] # Create the tables with connection.schema_editor() as editor: editor.create_model(LocalTagThrough) editor.create_model(TagM2MTest) editor.create_model(LocalBookWithM2MThrough) # Ensure there is now an m2m table there columns = self.column_classes(LocalTagThrough) self.assertEqual(columns['book_id'][0], "IntegerField") self.assertEqual(columns['tag_id'][0], "IntegerField") def test_m2m_create_through(self): self._test_m2m_create_through(ManyToManyField) def test_m2m_create_through_custom(self): self._test_m2m_create_through(CustomManyToManyField) def test_m2m_create_through_inherited(self): self._test_m2m_create_through(InheritedManyToManyField) def _test_m2m(self, M2MFieldClass): """ Tests adding/removing M2M fields on models """ class LocalAuthorWithM2M(Model): name = CharField(max_length=255) class Meta: app_label = 'schema' apps = new_apps self.local_models = [LocalAuthorWithM2M] # Create the tables with connection.schema_editor() as editor: editor.create_model(LocalAuthorWithM2M) editor.create_model(TagM2MTest) # Create an M2M field new_field = M2MFieldClass("schema.TagM2MTest", related_name="authors") new_field.contribute_to_class(LocalAuthorWithM2M, "tags") # Ensure there's no m2m table there with self.assertRaises(DatabaseError): self.column_classes(new_field.remote_field.through) # Add the field with connection.schema_editor() as editor: editor.add_field(LocalAuthorWithM2M, new_field) # Ensure there is now an m2m table there columns = self.column_classes(new_field.remote_field.through) self.assertEqual(columns['tagm2mtest_id'][0], "IntegerField") # "Alter" the field. This should not rename the DB table to itself. with connection.schema_editor() as editor: editor.alter_field(LocalAuthorWithM2M, new_field, new_field, strict=True) # Remove the M2M table again with connection.schema_editor() as editor: editor.remove_field(LocalAuthorWithM2M, new_field) # Ensure there's no m2m table there with self.assertRaises(DatabaseError): self.column_classes(new_field.remote_field.through) # Make sure the model state is coherent with the table one now that # we've removed the tags field. opts = LocalAuthorWithM2M._meta opts.local_many_to_many.remove(new_field) del new_apps.all_models['schema'][new_field.remote_field.through._meta.model_name] opts._expire_cache() def test_m2m(self): self._test_m2m(ManyToManyField) def test_m2m_custom(self): self._test_m2m(CustomManyToManyField) def test_m2m_inherited(self): self._test_m2m(InheritedManyToManyField) def _test_m2m_through_alter(self, M2MFieldClass): """ Tests altering M2Ms with explicit through models (should no-op) """ class LocalAuthorTag(Model): author = ForeignKey("schema.LocalAuthorWithM2MThrough", CASCADE) tag = ForeignKey("schema.TagM2MTest", CASCADE) class Meta: app_label = 'schema' apps = new_apps class LocalAuthorWithM2MThrough(Model): name = CharField(max_length=255) tags = M2MFieldClass("schema.TagM2MTest", related_name="authors", through=LocalAuthorTag) class Meta: app_label = 'schema' apps = new_apps self.local_models = [LocalAuthorTag, LocalAuthorWithM2MThrough] # Create the tables with connection.schema_editor() as editor: editor.create_model(LocalAuthorTag) editor.create_model(LocalAuthorWithM2MThrough) editor.create_model(TagM2MTest) # Ensure the m2m table is there self.assertEqual(len(self.column_classes(LocalAuthorTag)), 3) # "Alter" the field's blankness. This should not actually do anything. old_field = LocalAuthorWithM2MThrough._meta.get_field("tags") new_field = M2MFieldClass("schema.TagM2MTest", related_name="authors", through=LocalAuthorTag) new_field.contribute_to_class(LocalAuthorWithM2MThrough, "tags") with connection.schema_editor() as editor: editor.alter_field(LocalAuthorWithM2MThrough, old_field, new_field, strict=True) # Ensure the m2m table is still there self.assertEqual(len(self.column_classes(LocalAuthorTag)), 3) def test_m2m_through_alter(self): self._test_m2m_through_alter(ManyToManyField) def test_m2m_through_alter_custom(self): self._test_m2m_through_alter(CustomManyToManyField) def test_m2m_through_alter_inherited(self): self._test_m2m_through_alter(InheritedManyToManyField) def _test_m2m_repoint(self, M2MFieldClass): """ Tests repointing M2M fields """ class LocalBookWithM2M(Model): author = ForeignKey(Author, CASCADE) title = CharField(max_length=100, db_index=True) pub_date = DateTimeField() tags = M2MFieldClass("TagM2MTest", related_name="books") class Meta: app_label = 'schema' apps = new_apps self.local_models = [LocalBookWithM2M] # Create the tables with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(LocalBookWithM2M) editor.create_model(TagM2MTest) editor.create_model(UniqueTest) # Ensure the M2M exists and points to TagM2MTest if connection.features.supports_foreign_keys: self.assertForeignKeyExists( LocalBookWithM2M._meta.get_field("tags").remote_field.through, 'tagm2mtest_id', 'schema_tagm2mtest', ) # Repoint the M2M old_field = LocalBookWithM2M._meta.get_field("tags") new_field = M2MFieldClass(UniqueTest) new_field.contribute_to_class(LocalBookWithM2M, "uniques") with connection.schema_editor() as editor: editor.alter_field(LocalBookWithM2M, old_field, new_field, strict=True) # Ensure old M2M is gone with self.assertRaises(DatabaseError): self.column_classes(LocalBookWithM2M._meta.get_field("tags").remote_field.through) # This model looks like the new model and is used for teardown. opts = LocalBookWithM2M._meta opts.local_many_to_many.remove(old_field) # Ensure the new M2M exists and points to UniqueTest if connection.features.supports_foreign_keys: self.assertForeignKeyExists(new_field.remote_field.through, 'uniquetest_id', 'schema_uniquetest') def test_m2m_repoint(self): self._test_m2m_repoint(ManyToManyField) def test_m2m_repoint_custom(self): self._test_m2m_repoint(CustomManyToManyField) def test_m2m_repoint_inherited(self): self._test_m2m_repoint(InheritedManyToManyField) @isolate_apps('schema') def test_m2m_rename_field_in_target_model(self): class LocalTagM2MTest(Model): title = CharField(max_length=255) class Meta: app_label = 'schema' class LocalM2M(Model): tags = ManyToManyField(LocalTagM2MTest) class Meta: app_label = 'schema' # Create the tables. with connection.schema_editor() as editor: editor.create_model(LocalM2M) editor.create_model(LocalTagM2MTest) self.isolated_local_models = [LocalM2M, LocalTagM2MTest] # Ensure the m2m table is there. self.assertEqual(len(self.column_classes(LocalM2M)), 1) # Alter a field in LocalTagM2MTest. old_field = LocalTagM2MTest._meta.get_field('title') new_field = CharField(max_length=254) new_field.contribute_to_class(LocalTagM2MTest, 'title1') # @isolate_apps() and inner models are needed to have the model # relations populated, otherwise this doesn't act as a regression test. self.assertEqual(len(new_field.model._meta.related_objects), 1) with connection.schema_editor() as editor: editor.alter_field(LocalTagM2MTest, old_field, new_field, strict=True) # Ensure the m2m table is still there. self.assertEqual(len(self.column_classes(LocalM2M)), 1) @skipUnlessDBFeature('supports_column_check_constraints', 'can_introspect_check_constraints') def test_check_constraints(self): """ Tests creating/deleting CHECK constraints """ # Create the tables with connection.schema_editor() as editor: editor.create_model(Author) # Ensure the constraint exists constraints = self.get_constraints(Author._meta.db_table) if not any(details['columns'] == ['height'] and details['check'] for details in constraints.values()): self.fail("No check constraint for height found") # Alter the column to remove it old_field = Author._meta.get_field("height") new_field = IntegerField(null=True, blank=True) new_field.set_attributes_from_name("height") with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) constraints = self.get_constraints(Author._meta.db_table) for details in constraints.values(): if details['columns'] == ["height"] and details['check']: self.fail("Check constraint for height found") # Alter the column to re-add it new_field2 = Author._meta.get_field("height") with connection.schema_editor() as editor: editor.alter_field(Author, new_field, new_field2, strict=True) constraints = self.get_constraints(Author._meta.db_table) if not any(details['columns'] == ['height'] and details['check'] for details in constraints.values()): self.fail("No check constraint for height found") @skipUnlessDBFeature('supports_column_check_constraints', 'can_introspect_check_constraints') def test_remove_field_check_does_not_remove_meta_constraints(self): with connection.schema_editor() as editor: editor.create_model(Author) # Add the custom check constraint constraint = CheckConstraint(check=Q(height__gte=0), name='author_height_gte_0_check') custom_constraint_name = constraint.name Author._meta.constraints = [constraint] with connection.schema_editor() as editor: editor.add_constraint(Author, constraint) # Ensure the constraints exist constraints = self.get_constraints(Author._meta.db_table) self.assertIn(custom_constraint_name, constraints) other_constraints = [ name for name, details in constraints.items() if details['columns'] == ['height'] and details['check'] and name != custom_constraint_name ] self.assertEqual(len(other_constraints), 1) # Alter the column to remove field check old_field = Author._meta.get_field('height') new_field = IntegerField(null=True, blank=True) new_field.set_attributes_from_name('height') with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) constraints = self.get_constraints(Author._meta.db_table) self.assertIn(custom_constraint_name, constraints) other_constraints = [ name for name, details in constraints.items() if details['columns'] == ['height'] and details['check'] and name != custom_constraint_name ] self.assertEqual(len(other_constraints), 0) # Alter the column to re-add field check new_field2 = Author._meta.get_field('height') with connection.schema_editor() as editor: editor.alter_field(Author, new_field, new_field2, strict=True) constraints = self.get_constraints(Author._meta.db_table) self.assertIn(custom_constraint_name, constraints) other_constraints = [ name for name, details in constraints.items() if details['columns'] == ['height'] and details['check'] and name != custom_constraint_name ] self.assertEqual(len(other_constraints), 1) # Drop the check constraint with connection.schema_editor() as editor: Author._meta.constraints = [] editor.remove_constraint(Author, constraint) def test_unique(self): """ Tests removing and adding unique constraints to a single column. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Tag) # Ensure the field is unique to begin with Tag.objects.create(title="foo", slug="foo") with self.assertRaises(IntegrityError): Tag.objects.create(title="bar", slug="foo") Tag.objects.all().delete() # Alter the slug field to be non-unique old_field = Tag._meta.get_field("slug") new_field = SlugField(unique=False) new_field.set_attributes_from_name("slug") with connection.schema_editor() as editor: editor.alter_field(Tag, old_field, new_field, strict=True) # Ensure the field is no longer unique Tag.objects.create(title="foo", slug="foo") Tag.objects.create(title="bar", slug="foo") Tag.objects.all().delete() # Alter the slug field to be unique new_field2 = SlugField(unique=True) new_field2.set_attributes_from_name("slug") with connection.schema_editor() as editor: editor.alter_field(Tag, new_field, new_field2, strict=True) # Ensure the field is unique again Tag.objects.create(title="foo", slug="foo") with self.assertRaises(IntegrityError): Tag.objects.create(title="bar", slug="foo") Tag.objects.all().delete() # Rename the field new_field3 = SlugField(unique=True) new_field3.set_attributes_from_name("slug2") with connection.schema_editor() as editor: editor.alter_field(Tag, new_field2, new_field3, strict=True) # Ensure the field is still unique TagUniqueRename.objects.create(title="foo", slug2="foo") with self.assertRaises(IntegrityError): TagUniqueRename.objects.create(title="bar", slug2="foo") Tag.objects.all().delete() def test_unique_name_quoting(self): old_table_name = TagUniqueRename._meta.db_table try: with connection.schema_editor() as editor: editor.create_model(TagUniqueRename) editor.alter_db_table(TagUniqueRename, old_table_name, 'unique-table') TagUniqueRename._meta.db_table = 'unique-table' # This fails if the unique index name isn't quoted. editor.alter_unique_together(TagUniqueRename, [], (('title', 'slug2'),)) finally: TagUniqueRename._meta.db_table = old_table_name @isolate_apps('schema') @unittest.skipIf(connection.vendor == 'sqlite', 'SQLite naively remakes the table on field alteration.') @skipUnlessDBFeature('supports_foreign_keys') def test_unique_no_unnecessary_fk_drops(self): """ If AlterField isn't selective about dropping foreign key constraints when modifying a field with a unique constraint, the AlterField incorrectly drops and recreates the Book.author foreign key even though it doesn't restrict the field being changed (#29193). """ class Author(Model): name = CharField(max_length=254, unique=True) class Meta: app_label = 'schema' class Book(Model): author = ForeignKey(Author, CASCADE) class Meta: app_label = 'schema' with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) new_field = CharField(max_length=255, unique=True) new_field.model = Author new_field.set_attributes_from_name('name') with self.assertLogs('django.db.backends.schema', 'DEBUG') as cm: with connection.schema_editor() as editor: editor.alter_field(Author, Author._meta.get_field('name'), new_field) # One SQL statement is executed to alter the field. self.assertEqual(len(cm.records), 1) @isolate_apps('schema') @unittest.skipIf(connection.vendor == 'sqlite', 'SQLite remakes the table on field alteration.') def test_unique_and_reverse_m2m(self): """ AlterField can modify a unique field when there's a reverse M2M relation on the model. """ class Tag(Model): title = CharField(max_length=255) slug = SlugField(unique=True) class Meta: app_label = 'schema' class Book(Model): tags = ManyToManyField(Tag, related_name='books') class Meta: app_label = 'schema' self.isolated_local_models = [Book._meta.get_field('tags').remote_field.through] with connection.schema_editor() as editor: editor.create_model(Tag) editor.create_model(Book) new_field = SlugField(max_length=75, unique=True) new_field.model = Tag new_field.set_attributes_from_name('slug') with self.assertLogs('django.db.backends.schema', 'DEBUG') as cm: with connection.schema_editor() as editor: editor.alter_field(Tag, Tag._meta.get_field('slug'), new_field) # One SQL statement is executed to alter the field. self.assertEqual(len(cm.records), 1) # Ensure that the field is still unique. Tag.objects.create(title='foo', slug='foo') with self.assertRaises(IntegrityError): Tag.objects.create(title='bar', slug='foo') @skipUnlessDBFeature('allows_multiple_constraints_on_same_fields') def test_remove_field_unique_does_not_remove_meta_constraints(self): with connection.schema_editor() as editor: editor.create_model(AuthorWithUniqueName) # Add the custom unique constraint constraint = UniqueConstraint(fields=['name'], name='author_name_uniq') custom_constraint_name = constraint.name AuthorWithUniqueName._meta.constraints = [constraint] with connection.schema_editor() as editor: editor.add_constraint(AuthorWithUniqueName, constraint) # Ensure the constraints exist constraints = self.get_constraints(AuthorWithUniqueName._meta.db_table) self.assertIn(custom_constraint_name, constraints) other_constraints = [ name for name, details in constraints.items() if details['columns'] == ['name'] and details['unique'] and name != custom_constraint_name ] self.assertEqual(len(other_constraints), 1) # Alter the column to remove field uniqueness old_field = AuthorWithUniqueName._meta.get_field('name') new_field = CharField(max_length=255) new_field.set_attributes_from_name('name') with connection.schema_editor() as editor: editor.alter_field(AuthorWithUniqueName, old_field, new_field, strict=True) constraints = self.get_constraints(AuthorWithUniqueName._meta.db_table) self.assertIn(custom_constraint_name, constraints) other_constraints = [ name for name, details in constraints.items() if details['columns'] == ['name'] and details['unique'] and name != custom_constraint_name ] self.assertEqual(len(other_constraints), 0) # Alter the column to re-add field uniqueness new_field2 = AuthorWithUniqueName._meta.get_field('name') with connection.schema_editor() as editor: editor.alter_field(AuthorWithUniqueName, new_field, new_field2, strict=True) constraints = self.get_constraints(AuthorWithUniqueName._meta.db_table) self.assertIn(custom_constraint_name, constraints) other_constraints = [ name for name, details in constraints.items() if details['columns'] == ['name'] and details['unique'] and name != custom_constraint_name ] self.assertEqual(len(other_constraints), 1) # Drop the unique constraint with connection.schema_editor() as editor: AuthorWithUniqueName._meta.constraints = [] editor.remove_constraint(AuthorWithUniqueName, constraint) def test_unique_together(self): """ Tests removing and adding unique_together constraints on a model. """ # Create the table with connection.schema_editor() as editor: editor.create_model(UniqueTest) # Ensure the fields are unique to begin with UniqueTest.objects.create(year=2012, slug="foo") UniqueTest.objects.create(year=2011, slug="foo") UniqueTest.objects.create(year=2011, slug="bar") with self.assertRaises(IntegrityError): UniqueTest.objects.create(year=2012, slug="foo") UniqueTest.objects.all().delete() # Alter the model to its non-unique-together companion with connection.schema_editor() as editor: editor.alter_unique_together(UniqueTest, UniqueTest._meta.unique_together, []) # Ensure the fields are no longer unique UniqueTest.objects.create(year=2012, slug="foo") UniqueTest.objects.create(year=2012, slug="foo") UniqueTest.objects.all().delete() # Alter it back new_field2 = SlugField(unique=True) new_field2.set_attributes_from_name("slug") with connection.schema_editor() as editor: editor.alter_unique_together(UniqueTest, [], UniqueTest._meta.unique_together) # Ensure the fields are unique again UniqueTest.objects.create(year=2012, slug="foo") with self.assertRaises(IntegrityError): UniqueTest.objects.create(year=2012, slug="foo") UniqueTest.objects.all().delete() def test_unique_together_with_fk(self): """ Tests removing and adding unique_together constraints that include a foreign key. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) # Ensure the fields are unique to begin with self.assertEqual(Book._meta.unique_together, ()) # Add the unique_together constraint with connection.schema_editor() as editor: editor.alter_unique_together(Book, [], [['author', 'title']]) # Alter it back with connection.schema_editor() as editor: editor.alter_unique_together(Book, [['author', 'title']], []) def test_unique_together_with_fk_with_existing_index(self): """ Tests removing and adding unique_together constraints that include a foreign key, where the foreign key is added after the model is created. """ # Create the tables with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(BookWithoutAuthor) new_field = ForeignKey(Author, CASCADE) new_field.set_attributes_from_name('author') editor.add_field(BookWithoutAuthor, new_field) # Ensure the fields aren't unique to begin with self.assertEqual(Book._meta.unique_together, ()) # Add the unique_together constraint with connection.schema_editor() as editor: editor.alter_unique_together(Book, [], [['author', 'title']]) # Alter it back with connection.schema_editor() as editor: editor.alter_unique_together(Book, [['author', 'title']], []) @skipUnlessDBFeature('allows_multiple_constraints_on_same_fields') def test_remove_unique_together_does_not_remove_meta_constraints(self): with connection.schema_editor() as editor: editor.create_model(AuthorWithUniqueNameAndBirthday) # Add the custom unique constraint constraint = UniqueConstraint(fields=['name', 'birthday'], name='author_name_birthday_uniq') custom_constraint_name = constraint.name AuthorWithUniqueNameAndBirthday._meta.constraints = [constraint] with connection.schema_editor() as editor: editor.add_constraint(AuthorWithUniqueNameAndBirthday, constraint) # Ensure the constraints exist constraints = self.get_constraints(AuthorWithUniqueNameAndBirthday._meta.db_table) self.assertIn(custom_constraint_name, constraints) other_constraints = [ name for name, details in constraints.items() if details['columns'] == ['name', 'birthday'] and details['unique'] and name != custom_constraint_name ] self.assertEqual(len(other_constraints), 1) # Remove unique together unique_together = AuthorWithUniqueNameAndBirthday._meta.unique_together with connection.schema_editor() as editor: editor.alter_unique_together(AuthorWithUniqueNameAndBirthday, unique_together, []) constraints = self.get_constraints(AuthorWithUniqueNameAndBirthday._meta.db_table) self.assertIn(custom_constraint_name, constraints) other_constraints = [ name for name, details in constraints.items() if details['columns'] == ['name', 'birthday'] and details['unique'] and name != custom_constraint_name ] self.assertEqual(len(other_constraints), 0) # Re-add unique together with connection.schema_editor() as editor: editor.alter_unique_together(AuthorWithUniqueNameAndBirthday, [], unique_together) constraints = self.get_constraints(AuthorWithUniqueNameAndBirthday._meta.db_table) self.assertIn(custom_constraint_name, constraints) other_constraints = [ name for name, details in constraints.items() if details['columns'] == ['name', 'birthday'] and details['unique'] and name != custom_constraint_name ] self.assertEqual(len(other_constraints), 1) # Drop the unique constraint with connection.schema_editor() as editor: AuthorWithUniqueNameAndBirthday._meta.constraints = [] editor.remove_constraint(AuthorWithUniqueNameAndBirthday, constraint) def test_index_together(self): """ Tests removing and adding index_together constraints on a model. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Tag) # Ensure there's no index on the year/slug columns first self.assertEqual( False, any( c["index"] for c in self.get_constraints("schema_tag").values() if c['columns'] == ["slug", "title"] ), ) # Alter the model to add an index with connection.schema_editor() as editor: editor.alter_index_together(Tag, [], [("slug", "title")]) # Ensure there is now an index self.assertEqual( True, any( c["index"] for c in self.get_constraints("schema_tag").values() if c['columns'] == ["slug", "title"] ), ) # Alter it back new_field2 = SlugField(unique=True) new_field2.set_attributes_from_name("slug") with connection.schema_editor() as editor: editor.alter_index_together(Tag, [("slug", "title")], []) # Ensure there's no index self.assertEqual( False, any( c["index"] for c in self.get_constraints("schema_tag").values() if c['columns'] == ["slug", "title"] ), ) def test_index_together_with_fk(self): """ Tests removing and adding index_together constraints that include a foreign key. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) # Ensure the fields are unique to begin with self.assertEqual(Book._meta.index_together, ()) # Add the unique_together constraint with connection.schema_editor() as editor: editor.alter_index_together(Book, [], [['author', 'title']]) # Alter it back with connection.schema_editor() as editor: editor.alter_index_together(Book, [['author', 'title']], []) def test_create_index_together(self): """ Tests creating models with index_together already defined """ # Create the table with connection.schema_editor() as editor: editor.create_model(TagIndexed) # Ensure there is an index self.assertEqual( True, any( c["index"] for c in self.get_constraints("schema_tagindexed").values() if c['columns'] == ["slug", "title"] ), ) @skipUnlessDBFeature('allows_multiple_constraints_on_same_fields') def test_remove_index_together_does_not_remove_meta_indexes(self): with connection.schema_editor() as editor: editor.create_model(AuthorWithIndexedNameAndBirthday) # Add the custom index index = Index(fields=['name', 'birthday'], name='author_name_birthday_idx') custom_index_name = index.name AuthorWithIndexedNameAndBirthday._meta.indexes = [index] with connection.schema_editor() as editor: editor.add_index(AuthorWithIndexedNameAndBirthday, index) # Ensure the indexes exist constraints = self.get_constraints(AuthorWithIndexedNameAndBirthday._meta.db_table) self.assertIn(custom_index_name, constraints) other_constraints = [ name for name, details in constraints.items() if details['columns'] == ['name', 'birthday'] and details['index'] and name != custom_index_name ] self.assertEqual(len(other_constraints), 1) # Remove index together index_together = AuthorWithIndexedNameAndBirthday._meta.index_together with connection.schema_editor() as editor: editor.alter_index_together(AuthorWithIndexedNameAndBirthday, index_together, []) constraints = self.get_constraints(AuthorWithIndexedNameAndBirthday._meta.db_table) self.assertIn(custom_index_name, constraints) other_constraints = [ name for name, details in constraints.items() if details['columns'] == ['name', 'birthday'] and details['index'] and name != custom_index_name ] self.assertEqual(len(other_constraints), 0) # Re-add index together with connection.schema_editor() as editor: editor.alter_index_together(AuthorWithIndexedNameAndBirthday, [], index_together) constraints = self.get_constraints(AuthorWithIndexedNameAndBirthday._meta.db_table) self.assertIn(custom_index_name, constraints) other_constraints = [ name for name, details in constraints.items() if details['columns'] == ['name', 'birthday'] and details['index'] and name != custom_index_name ] self.assertEqual(len(other_constraints), 1) # Drop the index with connection.schema_editor() as editor: AuthorWithIndexedNameAndBirthday._meta.indexes = [] editor.remove_index(AuthorWithIndexedNameAndBirthday, index) @isolate_apps('schema') def test_db_table(self): """ Tests renaming of the table """ class Author(Model): name = CharField(max_length=255) class Meta: app_label = 'schema' class Book(Model): author = ForeignKey(Author, CASCADE) class Meta: app_label = 'schema' # Create the table and one referring it. with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) # Ensure the table is there to begin with columns = self.column_classes(Author) self.assertEqual(columns['name'][0], "CharField") # Alter the table with connection.schema_editor(atomic=connection.features.supports_atomic_references_rename) as editor: editor.alter_db_table(Author, "schema_author", "schema_otherauthor") # Ensure the table is there afterwards Author._meta.db_table = "schema_otherauthor" columns = self.column_classes(Author) self.assertEqual(columns['name'][0], "CharField") # Ensure the foreign key reference was updated self.assertForeignKeyExists(Book, "author_id", "schema_otherauthor") # Alter the table again with connection.schema_editor(atomic=connection.features.supports_atomic_references_rename) as editor: editor.alter_db_table(Author, "schema_otherauthor", "schema_author") # Ensure the table is still there Author._meta.db_table = "schema_author" columns = self.column_classes(Author) self.assertEqual(columns['name'][0], "CharField") def test_add_remove_index(self): """ Tests index addition and removal """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure the table is there and has no index self.assertNotIn('title', self.get_indexes(Author._meta.db_table)) # Add the index index = Index(fields=['name'], name='author_title_idx') with connection.schema_editor() as editor: editor.add_index(Author, index) self.assertIn('name', self.get_indexes(Author._meta.db_table)) # Drop the index with connection.schema_editor() as editor: editor.remove_index(Author, index) self.assertNotIn('name', self.get_indexes(Author._meta.db_table)) def test_remove_db_index_doesnt_remove_custom_indexes(self): """ Changing db_index to False doesn't remove indexes from Meta.indexes. """ with connection.schema_editor() as editor: editor.create_model(AuthorWithIndexedName) # Ensure the table has its index self.assertIn('name', self.get_indexes(AuthorWithIndexedName._meta.db_table)) # Add the custom index index = Index(fields=['-name'], name='author_name_idx') author_index_name = index.name with connection.schema_editor() as editor: db_index_name = editor._create_index_name( table_name=AuthorWithIndexedName._meta.db_table, column_names=('name',), ) try: AuthorWithIndexedName._meta.indexes = [index] with connection.schema_editor() as editor: editor.add_index(AuthorWithIndexedName, index) old_constraints = self.get_constraints(AuthorWithIndexedName._meta.db_table) self.assertIn(author_index_name, old_constraints) self.assertIn(db_index_name, old_constraints) # Change name field to db_index=False old_field = AuthorWithIndexedName._meta.get_field('name') new_field = CharField(max_length=255) new_field.set_attributes_from_name('name') with connection.schema_editor() as editor: editor.alter_field(AuthorWithIndexedName, old_field, new_field, strict=True) new_constraints = self.get_constraints(AuthorWithIndexedName._meta.db_table) self.assertNotIn(db_index_name, new_constraints) # The index from Meta.indexes is still in the database. self.assertIn(author_index_name, new_constraints) # Drop the index with connection.schema_editor() as editor: editor.remove_index(AuthorWithIndexedName, index) finally: AuthorWithIndexedName._meta.indexes = [] def test_order_index(self): """ Indexes defined with ordering (ASC/DESC) defined on column """ with connection.schema_editor() as editor: editor.create_model(Author) # The table doesn't have an index self.assertNotIn('title', self.get_indexes(Author._meta.db_table)) index_name = 'author_name_idx' # Add the index index = Index(fields=['name', '-weight'], name=index_name) with connection.schema_editor() as editor: editor.add_index(Author, index) if connection.features.supports_index_column_ordering: self.assertIndexOrder(Author._meta.db_table, index_name, ['ASC', 'DESC']) # Drop the index with connection.schema_editor() as editor: editor.remove_index(Author, index) def test_indexes(self): """ Tests creation/altering of indexes """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) # Ensure the table is there and has the right index self.assertIn( "title", self.get_indexes(Book._meta.db_table), ) # Alter to remove the index old_field = Book._meta.get_field("title") new_field = CharField(max_length=100, db_index=False) new_field.set_attributes_from_name("title") with connection.schema_editor() as editor: editor.alter_field(Book, old_field, new_field, strict=True) # Ensure the table is there and has no index self.assertNotIn( "title", self.get_indexes(Book._meta.db_table), ) # Alter to re-add the index new_field2 = Book._meta.get_field("title") with connection.schema_editor() as editor: editor.alter_field(Book, new_field, new_field2, strict=True) # Ensure the table is there and has the index again self.assertIn( "title", self.get_indexes(Book._meta.db_table), ) # Add a unique column, verify that creates an implicit index new_field3 = BookWithSlug._meta.get_field("slug") with connection.schema_editor() as editor: editor.add_field(Book, new_field3) self.assertIn( "slug", self.get_uniques(Book._meta.db_table), ) # Remove the unique, check the index goes with it new_field4 = CharField(max_length=20, unique=False) new_field4.set_attributes_from_name("slug") with connection.schema_editor() as editor: editor.alter_field(BookWithSlug, new_field3, new_field4, strict=True) self.assertNotIn( "slug", self.get_uniques(Book._meta.db_table), ) def test_text_field_with_db_index(self): with connection.schema_editor() as editor: editor.create_model(AuthorTextFieldWithIndex) # The text_field index is present if the database supports it. assertion = self.assertIn if connection.features.supports_index_on_text_field else self.assertNotIn assertion('text_field', self.get_indexes(AuthorTextFieldWithIndex._meta.db_table)) def test_primary_key(self): """ Tests altering of the primary key """ # Create the table with connection.schema_editor() as editor: editor.create_model(Tag) # Ensure the table is there and has the right PK self.assertEqual(self.get_primary_key(Tag._meta.db_table), 'id') # Alter to change the PK id_field = Tag._meta.get_field("id") old_field = Tag._meta.get_field("slug") new_field = SlugField(primary_key=True) new_field.set_attributes_from_name("slug") new_field.model = Tag with connection.schema_editor() as editor: editor.remove_field(Tag, id_field) editor.alter_field(Tag, old_field, new_field) # Ensure the PK changed self.assertNotIn( 'id', self.get_indexes(Tag._meta.db_table), ) self.assertEqual(self.get_primary_key(Tag._meta.db_table), 'slug') def test_context_manager_exit(self): """ Ensures transaction is correctly closed when an error occurs inside a SchemaEditor context. """ class SomeError(Exception): pass try: with connection.schema_editor(): raise SomeError except SomeError: self.assertFalse(connection.in_atomic_block) @skipIfDBFeature('can_rollback_ddl') def test_unsupported_transactional_ddl_disallowed(self): message = ( "Executing DDL statements while in a transaction on databases " "that can't perform a rollback is prohibited." ) with atomic(), connection.schema_editor() as editor: with self.assertRaisesMessage(TransactionManagementError, message): editor.execute(editor.sql_create_table % {'table': 'foo', 'definition': ''}) @skipUnlessDBFeature('supports_foreign_keys') def test_foreign_key_index_long_names_regression(self): """ Regression test for #21497. Only affects databases that supports foreign keys. """ # Create the table with connection.schema_editor() as editor: editor.create_model(AuthorWithEvenLongerName) editor.create_model(BookWithLongName) # Find the properly shortened column name column_name = connection.ops.quote_name("author_foreign_key_with_really_long_field_name_id") column_name = column_name[1:-1].lower() # unquote, and, for Oracle, un-upcase # Ensure the table is there and has an index on the column self.assertIn( column_name, self.get_indexes(BookWithLongName._meta.db_table), ) @skipUnlessDBFeature('supports_foreign_keys') def test_add_foreign_key_long_names(self): """ Regression test for #23009. Only affects databases that supports foreign keys. """ # Create the initial tables with connection.schema_editor() as editor: editor.create_model(AuthorWithEvenLongerName) editor.create_model(BookWithLongName) # Add a second FK, this would fail due to long ref name before the fix new_field = ForeignKey(AuthorWithEvenLongerName, CASCADE, related_name="something") new_field.set_attributes_from_name("author_other_really_long_named_i_mean_so_long_fk") with connection.schema_editor() as editor: editor.add_field(BookWithLongName, new_field) @isolate_apps('schema') @skipUnlessDBFeature('supports_foreign_keys') def test_add_foreign_key_quoted_db_table(self): class Author(Model): class Meta: db_table = '"table_author_double_quoted"' app_label = 'schema' class Book(Model): author = ForeignKey(Author, CASCADE) class Meta: app_label = 'schema' with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) if connection.vendor == 'mysql': self.assertForeignKeyExists(Book, 'author_id', '"table_author_double_quoted"') else: self.assertForeignKeyExists(Book, 'author_id', 'table_author_double_quoted') def test_add_foreign_object(self): with connection.schema_editor() as editor: editor.create_model(BookForeignObj) new_field = ForeignObject(Author, on_delete=CASCADE, from_fields=['author_id'], to_fields=['id']) new_field.set_attributes_from_name('author') with connection.schema_editor() as editor: editor.add_field(BookForeignObj, new_field) def test_creation_deletion_reserved_names(self): """ Tries creating a model's table, and then deleting it when it has a SQL reserved name. """ # Create the table with connection.schema_editor() as editor: try: editor.create_model(Thing) except OperationalError as e: self.fail("Errors when applying initial migration for a model " "with a table named after an SQL reserved word: %s" % e) # The table is there list(Thing.objects.all()) # Clean up that table with connection.schema_editor() as editor: editor.delete_model(Thing) # The table is gone with self.assertRaises(DatabaseError): list(Thing.objects.all()) def test_remove_constraints_capital_letters(self): """ #23065 - Constraint names must be quoted if they contain capital letters. """ def get_field(*args, field_class=IntegerField, **kwargs): kwargs['db_column'] = "CamelCase" field = field_class(*args, **kwargs) field.set_attributes_from_name("CamelCase") return field model = Author field = get_field() table = model._meta.db_table column = field.column identifier_converter = connection.introspection.identifier_converter with connection.schema_editor() as editor: editor.create_model(model) editor.add_field(model, field) constraint_name = 'CamelCaseIndex' expected_constraint_name = identifier_converter(constraint_name) editor.execute( editor.sql_create_index % { "table": editor.quote_name(table), "name": editor.quote_name(constraint_name), "using": "", "columns": editor.quote_name(column), "extra": "", "condition": "", } ) self.assertIn(expected_constraint_name, self.get_constraints(model._meta.db_table)) editor.alter_field(model, get_field(db_index=True), field, strict=True) self.assertNotIn(expected_constraint_name, self.get_constraints(model._meta.db_table)) constraint_name = 'CamelCaseUniqConstraint' expected_constraint_name = identifier_converter(constraint_name) editor.execute(editor._create_unique_sql(model, [field.column], constraint_name)) self.assertIn(expected_constraint_name, self.get_constraints(model._meta.db_table)) editor.alter_field(model, get_field(unique=True), field, strict=True) self.assertNotIn(expected_constraint_name, self.get_constraints(model._meta.db_table)) if editor.sql_create_fk: constraint_name = 'CamelCaseFKConstraint' expected_constraint_name = identifier_converter(constraint_name) editor.execute( editor.sql_create_fk % { "table": editor.quote_name(table), "name": editor.quote_name(constraint_name), "column": editor.quote_name(column), "to_table": editor.quote_name(table), "to_column": editor.quote_name(model._meta.auto_field.column), "deferrable": connection.ops.deferrable_sql(), } ) self.assertIn(expected_constraint_name, self.get_constraints(model._meta.db_table)) editor.alter_field(model, get_field(Author, CASCADE, field_class=ForeignKey), field, strict=True) self.assertNotIn(expected_constraint_name, self.get_constraints(model._meta.db_table)) def test_add_field_use_effective_default(self): """ #23987 - effective_default() should be used as the field default when adding a new field. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure there's no surname field columns = self.column_classes(Author) self.assertNotIn("surname", columns) # Create a row Author.objects.create(name='Anonymous1') # Add new CharField to ensure default will be used from effective_default new_field = CharField(max_length=15, blank=True) new_field.set_attributes_from_name("surname") with connection.schema_editor() as editor: editor.add_field(Author, new_field) # Ensure field was added with the right default with connection.cursor() as cursor: cursor.execute("SELECT surname FROM schema_author;") item = cursor.fetchall()[0] self.assertEqual(item[0], None if connection.features.interprets_empty_strings_as_nulls else '') def test_add_field_default_dropped(self): # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure there's no surname field columns = self.column_classes(Author) self.assertNotIn("surname", columns) # Create a row Author.objects.create(name='Anonymous1') # Add new CharField with a default new_field = CharField(max_length=15, blank=True, default='surname default') new_field.set_attributes_from_name("surname") with connection.schema_editor() as editor: editor.add_field(Author, new_field) # Ensure field was added with the right default with connection.cursor() as cursor: cursor.execute("SELECT surname FROM schema_author;") item = cursor.fetchall()[0] self.assertEqual(item[0], 'surname default') # And that the default is no longer set in the database. field = next( f for f in connection.introspection.get_table_description(cursor, "schema_author") if f.name == "surname" ) if connection.features.can_introspect_default: self.assertIsNone(field.default) def test_alter_field_default_dropped(self): # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Create a row Author.objects.create(name='Anonymous1') self.assertIsNone(Author.objects.get().height) old_field = Author._meta.get_field('height') # The default from the new field is used in updating existing rows. new_field = IntegerField(blank=True, default=42) new_field.set_attributes_from_name('height') with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) self.assertEqual(Author.objects.get().height, 42) # The database default should be removed. with connection.cursor() as cursor: field = next( f for f in connection.introspection.get_table_description(cursor, "schema_author") if f.name == "height" ) if connection.features.can_introspect_default: self.assertIsNone(field.default) @unittest.skipIf(connection.vendor == 'sqlite', 'SQLite naively remakes the table on field alteration.') def test_alter_field_default_doesnt_perfom_queries(self): """ No queries are performed if a field default changes and the field's not changing from null to non-null. """ with connection.schema_editor() as editor: editor.create_model(AuthorWithDefaultHeight) old_field = AuthorWithDefaultHeight._meta.get_field('height') new_default = old_field.default * 2 new_field = PositiveIntegerField(null=True, blank=True, default=new_default) new_field.set_attributes_from_name('height') with connection.schema_editor() as editor, self.assertNumQueries(0): editor.alter_field(AuthorWithDefaultHeight, old_field, new_field, strict=True) def test_add_textfield_unhashable_default(self): # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Create a row Author.objects.create(name='Anonymous1') # Create a field that has an unhashable default new_field = TextField(default={}) new_field.set_attributes_from_name("info") with connection.schema_editor() as editor: editor.add_field(Author, new_field) @unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific") def test_add_indexed_charfield(self): field = CharField(max_length=255, db_index=True) field.set_attributes_from_name('nom_de_plume') with connection.schema_editor() as editor: editor.create_model(Author) editor.add_field(Author, field) # Should create two indexes; one for like operator. self.assertEqual( self.get_constraints_for_column(Author, 'nom_de_plume'), ['schema_author_nom_de_plume_7570a851', 'schema_author_nom_de_plume_7570a851_like'], ) @unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific") def test_add_unique_charfield(self): field = CharField(max_length=255, unique=True) field.set_attributes_from_name('nom_de_plume') with connection.schema_editor() as editor: editor.create_model(Author) editor.add_field(Author, field) # Should create two indexes; one for like operator. self.assertEqual( self.get_constraints_for_column(Author, 'nom_de_plume'), ['schema_author_nom_de_plume_7570a851_like', 'schema_author_nom_de_plume_key'] ) @unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific") def test_alter_field_add_index_to_charfield(self): # Create the table and verify no initial indexes. with connection.schema_editor() as editor: editor.create_model(Author) self.assertEqual(self.get_constraints_for_column(Author, 'name'), []) # Alter to add db_index=True and create 2 indexes. old_field = Author._meta.get_field('name') new_field = CharField(max_length=255, db_index=True) new_field.set_attributes_from_name('name') with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) self.assertEqual( self.get_constraints_for_column(Author, 'name'), ['schema_author_name_1fbc5617', 'schema_author_name_1fbc5617_like'] ) # Remove db_index=True to drop both indexes. with connection.schema_editor() as editor: editor.alter_field(Author, new_field, old_field, strict=True) self.assertEqual(self.get_constraints_for_column(Author, 'name'), []) @unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific") def test_alter_field_add_unique_to_charfield(self): # Create the table and verify no initial indexes. with connection.schema_editor() as editor: editor.create_model(Author) self.assertEqual(self.get_constraints_for_column(Author, 'name'), []) # Alter to add unique=True and create 2 indexes. old_field = Author._meta.get_field('name') new_field = CharField(max_length=255, unique=True) new_field.set_attributes_from_name('name') with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) self.assertEqual( self.get_constraints_for_column(Author, 'name'), ['schema_author_name_1fbc5617_like', 'schema_author_name_1fbc5617_uniq'] ) # Remove unique=True to drop both indexes. with connection.schema_editor() as editor: editor.alter_field(Author, new_field, old_field, strict=True) self.assertEqual(self.get_constraints_for_column(Author, 'name'), []) @unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific") def test_alter_field_add_index_to_textfield(self): # Create the table and verify no initial indexes. with connection.schema_editor() as editor: editor.create_model(Note) self.assertEqual(self.get_constraints_for_column(Note, 'info'), []) # Alter to add db_index=True and create 2 indexes. old_field = Note._meta.get_field('info') new_field = TextField(db_index=True) new_field.set_attributes_from_name('info') with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) self.assertEqual( self.get_constraints_for_column(Note, 'info'), ['schema_note_info_4b0ea695', 'schema_note_info_4b0ea695_like'] ) # Remove db_index=True to drop both indexes. with connection.schema_editor() as editor: editor.alter_field(Note, new_field, old_field, strict=True) self.assertEqual(self.get_constraints_for_column(Note, 'info'), []) @unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific") def test_alter_field_add_unique_to_charfield_with_db_index(self): # Create the table and verify initial indexes. with connection.schema_editor() as editor: editor.create_model(BookWithoutAuthor) self.assertEqual( self.get_constraints_for_column(BookWithoutAuthor, 'title'), ['schema_book_title_2dfb2dff', 'schema_book_title_2dfb2dff_like'] ) # Alter to add unique=True (should replace the index) old_field = BookWithoutAuthor._meta.get_field('title') new_field = CharField(max_length=100, db_index=True, unique=True) new_field.set_attributes_from_name('title') with connection.schema_editor() as editor: editor.alter_field(BookWithoutAuthor, old_field, new_field, strict=True) self.assertEqual( self.get_constraints_for_column(BookWithoutAuthor, 'title'), ['schema_book_title_2dfb2dff_like', 'schema_book_title_2dfb2dff_uniq'] ) # Alter to remove unique=True (should drop unique index) new_field2 = CharField(max_length=100, db_index=True) new_field2.set_attributes_from_name('title') with connection.schema_editor() as editor: editor.alter_field(BookWithoutAuthor, new_field, new_field2, strict=True) self.assertEqual( self.get_constraints_for_column(BookWithoutAuthor, 'title'), ['schema_book_title_2dfb2dff', 'schema_book_title_2dfb2dff_like'] ) @unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific") def test_alter_field_remove_unique_and_db_index_from_charfield(self): # Create the table and verify initial indexes. with connection.schema_editor() as editor: editor.create_model(BookWithoutAuthor) self.assertEqual( self.get_constraints_for_column(BookWithoutAuthor, 'title'), ['schema_book_title_2dfb2dff', 'schema_book_title_2dfb2dff_like'] ) # Alter to add unique=True (should replace the index) old_field = BookWithoutAuthor._meta.get_field('title') new_field = CharField(max_length=100, db_index=True, unique=True) new_field.set_attributes_from_name('title') with connection.schema_editor() as editor: editor.alter_field(BookWithoutAuthor, old_field, new_field, strict=True) self.assertEqual( self.get_constraints_for_column(BookWithoutAuthor, 'title'), ['schema_book_title_2dfb2dff_like', 'schema_book_title_2dfb2dff_uniq'] ) # Alter to remove both unique=True and db_index=True (should drop all indexes) new_field2 = CharField(max_length=100) new_field2.set_attributes_from_name('title') with connection.schema_editor() as editor: editor.alter_field(BookWithoutAuthor, new_field, new_field2, strict=True) self.assertEqual(self.get_constraints_for_column(BookWithoutAuthor, 'title'), []) @unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific") def test_alter_field_swap_unique_and_db_index_with_charfield(self): # Create the table and verify initial indexes. with connection.schema_editor() as editor: editor.create_model(BookWithoutAuthor) self.assertEqual( self.get_constraints_for_column(BookWithoutAuthor, 'title'), ['schema_book_title_2dfb2dff', 'schema_book_title_2dfb2dff_like'] ) # Alter to set unique=True and remove db_index=True (should replace the index) old_field = BookWithoutAuthor._meta.get_field('title') new_field = CharField(max_length=100, unique=True) new_field.set_attributes_from_name('title') with connection.schema_editor() as editor: editor.alter_field(BookWithoutAuthor, old_field, new_field, strict=True) self.assertEqual( self.get_constraints_for_column(BookWithoutAuthor, 'title'), ['schema_book_title_2dfb2dff_like', 'schema_book_title_2dfb2dff_uniq'] ) # Alter to set db_index=True and remove unique=True (should restore index) new_field2 = CharField(max_length=100, db_index=True) new_field2.set_attributes_from_name('title') with connection.schema_editor() as editor: editor.alter_field(BookWithoutAuthor, new_field, new_field2, strict=True) self.assertEqual( self.get_constraints_for_column(BookWithoutAuthor, 'title'), ['schema_book_title_2dfb2dff', 'schema_book_title_2dfb2dff_like'] ) @unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific") def test_alter_field_add_db_index_to_charfield_with_unique(self): # Create the table and verify initial indexes. with connection.schema_editor() as editor: editor.create_model(Tag) self.assertEqual( self.get_constraints_for_column(Tag, 'slug'), ['schema_tag_slug_2c418ba3_like', 'schema_tag_slug_key'] ) # Alter to add db_index=True old_field = Tag._meta.get_field('slug') new_field = SlugField(db_index=True, unique=True) new_field.set_attributes_from_name('slug') with connection.schema_editor() as editor: editor.alter_field(Tag, old_field, new_field, strict=True) self.assertEqual( self.get_constraints_for_column(Tag, 'slug'), ['schema_tag_slug_2c418ba3_like', 'schema_tag_slug_key'] ) # Alter to remove db_index=True new_field2 = SlugField(unique=True) new_field2.set_attributes_from_name('slug') with connection.schema_editor() as editor: editor.alter_field(Tag, new_field, new_field2, strict=True) self.assertEqual( self.get_constraints_for_column(Tag, 'slug'), ['schema_tag_slug_2c418ba3_like', 'schema_tag_slug_key'] ) def test_alter_field_add_index_to_integerfield(self): # Create the table and verify no initial indexes. with connection.schema_editor() as editor: editor.create_model(Author) self.assertEqual(self.get_constraints_for_column(Author, 'weight'), []) # Alter to add db_index=True and create index. old_field = Author._meta.get_field('weight') new_field = IntegerField(null=True, db_index=True) new_field.set_attributes_from_name('weight') with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) self.assertEqual(self.get_constraints_for_column(Author, 'weight'), ['schema_author_weight_587740f9']) # Remove db_index=True to drop index. with connection.schema_editor() as editor: editor.alter_field(Author, new_field, old_field, strict=True) self.assertEqual(self.get_constraints_for_column(Author, 'weight'), []) def test_alter_pk_with_self_referential_field(self): """ Changing the primary key field name of a model with a self-referential foreign key (#26384). """ with connection.schema_editor() as editor: editor.create_model(Node) old_field = Node._meta.get_field('node_id') new_field = AutoField(primary_key=True) new_field.set_attributes_from_name('id') with connection.schema_editor() as editor: editor.alter_field(Node, old_field, new_field, strict=True) self.assertForeignKeyExists(Node, 'parent_id', Node._meta.db_table) @mock.patch('django.db.backends.base.schema.datetime') @mock.patch('django.db.backends.base.schema.timezone') def test_add_datefield_and_datetimefield_use_effective_default(self, mocked_datetime, mocked_tz): """ effective_default() should be used for DateField, DateTimeField, and TimeField if auto_now or auto_add_now is set (#25005). """ now = datetime.datetime(month=1, day=1, year=2000, hour=1, minute=1) now_tz = datetime.datetime(month=1, day=1, year=2000, hour=1, minute=1, tzinfo=timezone.utc) mocked_datetime.now = mock.MagicMock(return_value=now) mocked_tz.now = mock.MagicMock(return_value=now_tz) # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Check auto_now/auto_now_add attributes are not defined columns = self.column_classes(Author) self.assertNotIn("dob_auto_now", columns) self.assertNotIn("dob_auto_now_add", columns) self.assertNotIn("dtob_auto_now", columns) self.assertNotIn("dtob_auto_now_add", columns) self.assertNotIn("tob_auto_now", columns) self.assertNotIn("tob_auto_now_add", columns) # Create a row Author.objects.create(name='Anonymous1') # Ensure fields were added with the correct defaults dob_auto_now = DateField(auto_now=True) dob_auto_now.set_attributes_from_name('dob_auto_now') self.check_added_field_default( editor, Author, dob_auto_now, 'dob_auto_now', now.date(), cast_function=lambda x: x.date(), ) dob_auto_now_add = DateField(auto_now_add=True) dob_auto_now_add.set_attributes_from_name('dob_auto_now_add') self.check_added_field_default( editor, Author, dob_auto_now_add, 'dob_auto_now_add', now.date(), cast_function=lambda x: x.date(), ) dtob_auto_now = DateTimeField(auto_now=True) dtob_auto_now.set_attributes_from_name('dtob_auto_now') self.check_added_field_default( editor, Author, dtob_auto_now, 'dtob_auto_now', now, ) dt_tm_of_birth_auto_now_add = DateTimeField(auto_now_add=True) dt_tm_of_birth_auto_now_add.set_attributes_from_name('dtob_auto_now_add') self.check_added_field_default( editor, Author, dt_tm_of_birth_auto_now_add, 'dtob_auto_now_add', now, ) tob_auto_now = TimeField(auto_now=True) tob_auto_now.set_attributes_from_name('tob_auto_now') self.check_added_field_default( editor, Author, tob_auto_now, 'tob_auto_now', now.time(), cast_function=lambda x: x.time(), ) tob_auto_now_add = TimeField(auto_now_add=True) tob_auto_now_add.set_attributes_from_name('tob_auto_now_add') self.check_added_field_default( editor, Author, tob_auto_now_add, 'tob_auto_now_add', now.time(), cast_function=lambda x: x.time(), ) def test_namespaced_db_table_create_index_name(self): """ Table names are stripped of their namespace/schema before being used to generate index names. """ with connection.schema_editor() as editor: max_name_length = connection.ops.max_name_length() or 200 namespace = 'n' * max_name_length table_name = 't' * max_name_length namespaced_table_name = '"%s"."%s"' % (namespace, table_name) self.assertEqual( editor._create_index_name(table_name, []), editor._create_index_name(namespaced_table_name, []), ) @unittest.skipUnless(connection.vendor == 'oracle', 'Oracle specific db_table syntax') def test_creation_with_db_table_double_quotes(self): oracle_user = connection.creation._test_database_user() class Student(Model): name = CharField(max_length=30) class Meta: app_label = 'schema' apps = new_apps db_table = '"%s"."DJANGO_STUDENT_TABLE"' % oracle_user class Document(Model): name = CharField(max_length=30) students = ManyToManyField(Student) class Meta: app_label = 'schema' apps = new_apps db_table = '"%s"."DJANGO_DOCUMENT_TABLE"' % oracle_user self.local_models = [Student, Document] with connection.schema_editor() as editor: editor.create_model(Student) editor.create_model(Document) doc = Document.objects.create(name='Test Name') student = Student.objects.create(name='Some man') doc.students.add(student) def test_rename_table_renames_deferred_sql_references(self): atomic_rename = connection.features.supports_atomic_references_rename with connection.schema_editor(atomic=atomic_rename) as editor: editor.create_model(Author) editor.create_model(Book) editor.alter_db_table(Author, 'schema_author', 'schema_renamed_author') editor.alter_db_table(Author, 'schema_book', 'schema_renamed_book') self.assertGreater(len(editor.deferred_sql), 0) for statement in editor.deferred_sql: self.assertIs(statement.references_table('schema_author'), False) self.assertIs(statement.references_table('schema_book'), False) @unittest.skipIf(connection.vendor == 'sqlite', 'SQLite naively remakes the table on field alteration.') def test_rename_column_renames_deferred_sql_references(self): with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) old_title = Book._meta.get_field('title') new_title = CharField(max_length=100, db_index=True) new_title.set_attributes_from_name('renamed_title') editor.alter_field(Book, old_title, new_title) old_author = Book._meta.get_field('author') new_author = ForeignKey(Author, CASCADE) new_author.set_attributes_from_name('renamed_author') editor.alter_field(Book, old_author, new_author) self.assertGreater(len(editor.deferred_sql), 0) for statement in editor.deferred_sql: self.assertIs(statement.references_column('book', 'title'), False) self.assertIs(statement.references_column('book', 'author_id'), False) @isolate_apps('schema') def test_referenced_field_without_constraint_rename_inside_atomic_block(self): """ Foreign keys without database level constraint don't prevent the field they reference from being renamed in an atomic block. """ class Foo(Model): field = CharField(max_length=255, unique=True) class Meta: app_label = 'schema' class Bar(Model): foo = ForeignKey(Foo, CASCADE, to_field='field', db_constraint=False) class Meta: app_label = 'schema' self.isolated_local_models = [Foo, Bar] with connection.schema_editor() as editor: editor.create_model(Foo) editor.create_model(Bar) new_field = CharField(max_length=255, unique=True) new_field.set_attributes_from_name('renamed') with connection.schema_editor(atomic=True) as editor: editor.alter_field(Foo, Foo._meta.get_field('field'), new_field) @isolate_apps('schema') def test_referenced_table_without_constraint_rename_inside_atomic_block(self): """ Foreign keys without database level constraint don't prevent the table they reference from being renamed in an atomic block. """ class Foo(Model): field = CharField(max_length=255, unique=True) class Meta: app_label = 'schema' class Bar(Model): foo = ForeignKey(Foo, CASCADE, to_field='field', db_constraint=False) class Meta: app_label = 'schema' self.isolated_local_models = [Foo, Bar] with connection.schema_editor() as editor: editor.create_model(Foo) editor.create_model(Bar) new_field = CharField(max_length=255, unique=True) new_field.set_attributes_from_name('renamed') with connection.schema_editor(atomic=True) as editor: editor.alter_db_table(Foo, Foo._meta.db_table, 'renamed_table') Foo._meta.db_table = 'renamed_table'
e584230a4fc203578c73d216067d1e50eef15e25088ca8c2d6a34e08b364c582
from django.conf import settings from django.core.checks.security import base, csrf, sessions from django.test import SimpleTestCase from django.test.utils import override_settings class CheckSessionCookieSecureTest(SimpleTestCase): @property def func(self): from django.core.checks.security.sessions import check_session_cookie_secure return check_session_cookie_secure @override_settings( SESSION_COOKIE_SECURE=False, INSTALLED_APPS=["django.contrib.sessions"], MIDDLEWARE=[], ) def test_session_cookie_secure_with_installed_app(self): """ Warn if SESSION_COOKIE_SECURE is off and "django.contrib.sessions" is in INSTALLED_APPS. """ self.assertEqual(self.func(None), [sessions.W010]) @override_settings( SESSION_COOKIE_SECURE=False, INSTALLED_APPS=[], MIDDLEWARE=['django.contrib.sessions.middleware.SessionMiddleware'], ) def test_session_cookie_secure_with_middleware(self): """ Warn if SESSION_COOKIE_SECURE is off and "django.contrib.sessions.middleware.SessionMiddleware" is in MIDDLEWARE. """ self.assertEqual(self.func(None), [sessions.W011]) @override_settings( SESSION_COOKIE_SECURE=False, INSTALLED_APPS=["django.contrib.sessions"], MIDDLEWARE=['django.contrib.sessions.middleware.SessionMiddleware'], ) def test_session_cookie_secure_both(self): """ If SESSION_COOKIE_SECURE is off and we find both the session app and the middleware, provide one common warning. """ self.assertEqual(self.func(None), [sessions.W012]) @override_settings( SESSION_COOKIE_SECURE=True, INSTALLED_APPS=["django.contrib.sessions"], MIDDLEWARE=['django.contrib.sessions.middleware.SessionMiddleware'], ) def test_session_cookie_secure_true(self): """ If SESSION_COOKIE_SECURE is on, there's no warning about it. """ self.assertEqual(self.func(None), []) class CheckSessionCookieHttpOnlyTest(SimpleTestCase): @property def func(self): from django.core.checks.security.sessions import check_session_cookie_httponly return check_session_cookie_httponly @override_settings( SESSION_COOKIE_HTTPONLY=False, INSTALLED_APPS=["django.contrib.sessions"], MIDDLEWARE=[], ) def test_session_cookie_httponly_with_installed_app(self): """ Warn if SESSION_COOKIE_HTTPONLY is off and "django.contrib.sessions" is in INSTALLED_APPS. """ self.assertEqual(self.func(None), [sessions.W013]) @override_settings( SESSION_COOKIE_HTTPONLY=False, INSTALLED_APPS=[], MIDDLEWARE=['django.contrib.sessions.middleware.SessionMiddleware'], ) def test_session_cookie_httponly_with_middleware(self): """ Warn if SESSION_COOKIE_HTTPONLY is off and "django.contrib.sessions.middleware.SessionMiddleware" is in MIDDLEWARE. """ self.assertEqual(self.func(None), [sessions.W014]) @override_settings( SESSION_COOKIE_HTTPONLY=False, INSTALLED_APPS=["django.contrib.sessions"], MIDDLEWARE=['django.contrib.sessions.middleware.SessionMiddleware'], ) def test_session_cookie_httponly_both(self): """ If SESSION_COOKIE_HTTPONLY is off and we find both the session app and the middleware, provide one common warning. """ self.assertEqual(self.func(None), [sessions.W015]) @override_settings( SESSION_COOKIE_HTTPONLY=True, INSTALLED_APPS=["django.contrib.sessions"], MIDDLEWARE=['django.contrib.sessions.middleware.SessionMiddleware'], ) def test_session_cookie_httponly_true(self): """ If SESSION_COOKIE_HTTPONLY is on, there's no warning about it. """ self.assertEqual(self.func(None), []) class CheckCSRFMiddlewareTest(SimpleTestCase): @property def func(self): from django.core.checks.security.csrf import check_csrf_middleware return check_csrf_middleware @override_settings(MIDDLEWARE=[]) def test_no_csrf_middleware(self): """ Warn if CsrfViewMiddleware isn't in MIDDLEWARE. """ self.assertEqual(self.func(None), [csrf.W003]) @override_settings(MIDDLEWARE=['django.middleware.csrf.CsrfViewMiddleware']) def test_with_csrf_middleware(self): self.assertEqual(self.func(None), []) class CheckCSRFCookieSecureTest(SimpleTestCase): @property def func(self): from django.core.checks.security.csrf import check_csrf_cookie_secure return check_csrf_cookie_secure @override_settings( MIDDLEWARE=["django.middleware.csrf.CsrfViewMiddleware"], CSRF_COOKIE_SECURE=False, ) def test_with_csrf_cookie_secure_false(self): """ Warn if CsrfViewMiddleware is in MIDDLEWARE but CSRF_COOKIE_SECURE isn't True. """ self.assertEqual(self.func(None), [csrf.W016]) @override_settings( MIDDLEWARE=["django.middleware.csrf.CsrfViewMiddleware"], CSRF_USE_SESSIONS=True, CSRF_COOKIE_SECURE=False, ) def test_use_sessions_with_csrf_cookie_secure_false(self): """ No warning if CSRF_COOKIE_SECURE isn't True while CSRF_USE_SESSIONS is True. """ self.assertEqual(self.func(None), []) @override_settings(MIDDLEWARE=[], CSRF_COOKIE_SECURE=False) def test_with_csrf_cookie_secure_false_no_middleware(self): """ No warning if CsrfViewMiddleware isn't in MIDDLEWARE, even if CSRF_COOKIE_SECURE is False. """ self.assertEqual(self.func(None), []) @override_settings( MIDDLEWARE=["django.middleware.csrf.CsrfViewMiddleware"], CSRF_COOKIE_SECURE=True, ) def test_with_csrf_cookie_secure_true(self): self.assertEqual(self.func(None), []) class CheckSecurityMiddlewareTest(SimpleTestCase): @property def func(self): from django.core.checks.security.base import check_security_middleware return check_security_middleware @override_settings(MIDDLEWARE=[]) def test_no_security_middleware(self): """ Warn if SecurityMiddleware isn't in MIDDLEWARE. """ self.assertEqual(self.func(None), [base.W001]) @override_settings(MIDDLEWARE=['django.middleware.security.SecurityMiddleware']) def test_with_security_middleware(self): self.assertEqual(self.func(None), []) class CheckStrictTransportSecurityTest(SimpleTestCase): @property def func(self): from django.core.checks.security.base import check_sts return check_sts @override_settings( MIDDLEWARE=["django.middleware.security.SecurityMiddleware"], SECURE_HSTS_SECONDS=0, ) def test_no_sts(self): """ Warn if SECURE_HSTS_SECONDS isn't > 0. """ self.assertEqual(self.func(None), [base.W004]) @override_settings(MIDDLEWARE=[], SECURE_HSTS_SECONDS=0) def test_no_sts_no_middleware(self): """ Don't warn if SECURE_HSTS_SECONDS isn't > 0 and SecurityMiddleware isn't installed. """ self.assertEqual(self.func(None), []) @override_settings( MIDDLEWARE=["django.middleware.security.SecurityMiddleware"], SECURE_HSTS_SECONDS=3600, ) def test_with_sts(self): self.assertEqual(self.func(None), []) class CheckStrictTransportSecuritySubdomainsTest(SimpleTestCase): @property def func(self): from django.core.checks.security.base import check_sts_include_subdomains return check_sts_include_subdomains @override_settings( MIDDLEWARE=["django.middleware.security.SecurityMiddleware"], SECURE_HSTS_INCLUDE_SUBDOMAINS=False, SECURE_HSTS_SECONDS=3600, ) def test_no_sts_subdomains(self): """ Warn if SECURE_HSTS_INCLUDE_SUBDOMAINS isn't True. """ self.assertEqual(self.func(None), [base.W005]) @override_settings( MIDDLEWARE=[], SECURE_HSTS_INCLUDE_SUBDOMAINS=False, SECURE_HSTS_SECONDS=3600, ) def test_no_sts_subdomains_no_middleware(self): """ Don't warn if SecurityMiddleware isn't installed. """ self.assertEqual(self.func(None), []) @override_settings( MIDDLEWARE=["django.middleware.security.SecurityMiddleware"], SECURE_SSL_REDIRECT=False, SECURE_HSTS_SECONDS=None, ) def test_no_sts_subdomains_no_seconds(self): """ Don't warn if SECURE_HSTS_SECONDS isn't set. """ self.assertEqual(self.func(None), []) @override_settings( MIDDLEWARE=["django.middleware.security.SecurityMiddleware"], SECURE_HSTS_INCLUDE_SUBDOMAINS=True, SECURE_HSTS_SECONDS=3600, ) def test_with_sts_subdomains(self): self.assertEqual(self.func(None), []) class CheckStrictTransportSecurityPreloadTest(SimpleTestCase): @property def func(self): from django.core.checks.security.base import check_sts_preload return check_sts_preload @override_settings( MIDDLEWARE=["django.middleware.security.SecurityMiddleware"], SECURE_HSTS_PRELOAD=False, SECURE_HSTS_SECONDS=3600, ) def test_no_sts_preload(self): """ Warn if SECURE_HSTS_PRELOAD isn't True. """ self.assertEqual(self.func(None), [base.W021]) @override_settings(MIDDLEWARE=[], SECURE_HSTS_PRELOAD=False, SECURE_HSTS_SECONDS=3600) def test_no_sts_preload_no_middleware(self): """ Don't warn if SecurityMiddleware isn't installed. """ self.assertEqual(self.func(None), []) @override_settings( MIDDLEWARE=["django.middleware.security.SecurityMiddleware"], SECURE_SSL_REDIRECT=False, SECURE_HSTS_SECONDS=None, ) def test_no_sts_preload_no_seconds(self): """ Don't warn if SECURE_HSTS_SECONDS isn't set. """ self.assertEqual(self.func(None), []) @override_settings( MIDDLEWARE=["django.middleware.security.SecurityMiddleware"], SECURE_HSTS_PRELOAD=True, SECURE_HSTS_SECONDS=3600, ) def test_with_sts_preload(self): self.assertEqual(self.func(None), []) class CheckXFrameOptionsMiddlewareTest(SimpleTestCase): @property def func(self): from django.core.checks.security.base import check_xframe_options_middleware return check_xframe_options_middleware @override_settings(MIDDLEWARE=[]) def test_middleware_not_installed(self): """ Warn if XFrameOptionsMiddleware isn't in MIDDLEWARE. """ self.assertEqual(self.func(None), [base.W002]) @override_settings(MIDDLEWARE=["django.middleware.clickjacking.XFrameOptionsMiddleware"]) def test_middleware_installed(self): self.assertEqual(self.func(None), []) class CheckXFrameOptionsDenyTest(SimpleTestCase): @property def func(self): from django.core.checks.security.base import check_xframe_deny return check_xframe_deny @override_settings( MIDDLEWARE=["django.middleware.clickjacking.XFrameOptionsMiddleware"], X_FRAME_OPTIONS='SAMEORIGIN', ) def test_x_frame_options_not_deny(self): """ Warn if XFrameOptionsMiddleware is in MIDDLEWARE but X_FRAME_OPTIONS isn't 'DENY'. """ self.assertEqual(self.func(None), [base.W019]) @override_settings(MIDDLEWARE=[], X_FRAME_OPTIONS='SAMEORIGIN') def test_middleware_not_installed(self): """ No error if XFrameOptionsMiddleware isn't in MIDDLEWARE even if X_FRAME_OPTIONS isn't 'DENY'. """ self.assertEqual(self.func(None), []) @override_settings( MIDDLEWARE=["django.middleware.clickjacking.XFrameOptionsMiddleware"], X_FRAME_OPTIONS='DENY', ) def test_xframe_deny(self): self.assertEqual(self.func(None), []) class CheckContentTypeNosniffTest(SimpleTestCase): @property def func(self): from django.core.checks.security.base import check_content_type_nosniff return check_content_type_nosniff @override_settings( MIDDLEWARE=["django.middleware.security.SecurityMiddleware"], SECURE_CONTENT_TYPE_NOSNIFF=False, ) def test_no_content_type_nosniff(self): """ Warn if SECURE_CONTENT_TYPE_NOSNIFF isn't True. """ self.assertEqual(self.func(None), [base.W006]) @override_settings(MIDDLEWARE=[], SECURE_CONTENT_TYPE_NOSNIFF=False) def test_no_content_type_nosniff_no_middleware(self): """ Don't warn if SECURE_CONTENT_TYPE_NOSNIFF isn't True and SecurityMiddleware isn't in MIDDLEWARE. """ self.assertEqual(self.func(None), []) @override_settings( MIDDLEWARE=["django.middleware.security.SecurityMiddleware"], SECURE_CONTENT_TYPE_NOSNIFF=True, ) def test_with_content_type_nosniff(self): self.assertEqual(self.func(None), []) class CheckSSLRedirectTest(SimpleTestCase): @property def func(self): from django.core.checks.security.base import check_ssl_redirect return check_ssl_redirect @override_settings( MIDDLEWARE=["django.middleware.security.SecurityMiddleware"], SECURE_SSL_REDIRECT=False, ) def test_no_ssl_redirect(self): """ Warn if SECURE_SSL_REDIRECT isn't True. """ self.assertEqual(self.func(None), [base.W008]) @override_settings(MIDDLEWARE=[], SECURE_SSL_REDIRECT=False) def test_no_ssl_redirect_no_middleware(self): """ Don't warn if SECURE_SSL_REDIRECT is False and SecurityMiddleware isn't installed. """ self.assertEqual(self.func(None), []) @override_settings( MIDDLEWARE=["django.middleware.security.SecurityMiddleware"], SECURE_SSL_REDIRECT=True, ) def test_with_ssl_redirect(self): self.assertEqual(self.func(None), []) class CheckSecretKeyTest(SimpleTestCase): @property def func(self): from django.core.checks.security.base import check_secret_key return check_secret_key @override_settings(SECRET_KEY=('abcdefghijklmnopqrstuvwx' * 2) + 'ab') def test_okay_secret_key(self): self.assertEqual(len(settings.SECRET_KEY), base.SECRET_KEY_MIN_LENGTH) self.assertGreater(len(set(settings.SECRET_KEY)), base.SECRET_KEY_MIN_UNIQUE_CHARACTERS) self.assertEqual(self.func(None), []) @override_settings(SECRET_KEY='') def test_empty_secret_key(self): self.assertEqual(self.func(None), [base.W009]) @override_settings(SECRET_KEY=None) def test_missing_secret_key(self): del settings.SECRET_KEY self.assertEqual(self.func(None), [base.W009]) @override_settings(SECRET_KEY=None) def test_none_secret_key(self): self.assertEqual(self.func(None), [base.W009]) @override_settings(SECRET_KEY=('abcdefghijklmnopqrstuvwx' * 2) + 'a') def test_low_length_secret_key(self): self.assertEqual(len(settings.SECRET_KEY), base.SECRET_KEY_MIN_LENGTH - 1) self.assertEqual(self.func(None), [base.W009]) @override_settings(SECRET_KEY='abcd' * 20) def test_low_entropy_secret_key(self): self.assertGreater(len(settings.SECRET_KEY), base.SECRET_KEY_MIN_LENGTH) self.assertLess(len(set(settings.SECRET_KEY)), base.SECRET_KEY_MIN_UNIQUE_CHARACTERS) self.assertEqual(self.func(None), [base.W009]) class CheckDebugTest(SimpleTestCase): @property def func(self): from django.core.checks.security.base import check_debug return check_debug @override_settings(DEBUG=True) def test_debug_true(self): """ Warn if DEBUG is True. """ self.assertEqual(self.func(None), [base.W018]) @override_settings(DEBUG=False) def test_debug_false(self): self.assertEqual(self.func(None), []) class CheckAllowedHostsTest(SimpleTestCase): @property def func(self): from django.core.checks.security.base import check_allowed_hosts return check_allowed_hosts @override_settings(ALLOWED_HOSTS=[]) def test_allowed_hosts_empty(self): self.assertEqual(self.func(None), [base.W020]) @override_settings(ALLOWED_HOSTS=['.example.com']) def test_allowed_hosts_set(self): self.assertEqual(self.func(None), [])
036668c93c2d906f257815dd1bf553c187c4be267b9c5807be1694c6ccca5e82
from datetime import datetime from operator import attrgetter from django.core.exceptions import FieldError from django.db.models import ( CharField, Count, DateTimeField, F, Max, OuterRef, Subquery, Value, ) from django.db.models.functions import Upper from django.test import TestCase from django.utils.deprecation import RemovedInDjango31Warning from .models import Article, Author, ChildArticle, OrderedByFArticle, Reference class OrderingTests(TestCase): @classmethod def setUpTestData(cls): cls.a1 = Article.objects.create(headline="Article 1", pub_date=datetime(2005, 7, 26)) cls.a2 = Article.objects.create(headline="Article 2", pub_date=datetime(2005, 7, 27)) cls.a3 = Article.objects.create(headline="Article 3", pub_date=datetime(2005, 7, 27)) cls.a4 = Article.objects.create(headline="Article 4", pub_date=datetime(2005, 7, 28)) cls.author_1 = Author.objects.create(name="Name 1") cls.author_2 = Author.objects.create(name="Name 2") for i in range(2): Author.objects.create() def test_default_ordering(self): """ By default, Article.objects.all() orders by pub_date descending, then headline ascending. """ self.assertQuerysetEqual( Article.objects.all(), [ "Article 4", "Article 2", "Article 3", "Article 1", ], attrgetter("headline") ) # Getting a single item should work too: self.assertEqual(Article.objects.all()[0], self.a4) def test_default_ordering_override(self): """ Override ordering with order_by, which is in the same format as the ordering attribute in models. """ self.assertQuerysetEqual( Article.objects.order_by("headline"), [ "Article 1", "Article 2", "Article 3", "Article 4", ], attrgetter("headline") ) self.assertQuerysetEqual( Article.objects.order_by("pub_date", "-headline"), [ "Article 1", "Article 3", "Article 2", "Article 4", ], attrgetter("headline") ) def test_order_by_override(self): """ Only the last order_by has any effect (since they each override any previous ordering). """ self.assertQuerysetEqual( Article.objects.order_by("id"), [ "Article 1", "Article 2", "Article 3", "Article 4", ], attrgetter("headline") ) self.assertQuerysetEqual( Article.objects.order_by("id").order_by("-headline"), [ "Article 4", "Article 3", "Article 2", "Article 1", ], attrgetter("headline") ) def test_order_by_nulls_first_and_last(self): msg = "nulls_first and nulls_last are mutually exclusive" with self.assertRaisesMessage(ValueError, msg): Article.objects.order_by(F("author").desc(nulls_last=True, nulls_first=True)) def assertQuerysetEqualReversible(self, queryset, sequence): self.assertSequenceEqual(queryset, sequence) self.assertSequenceEqual(queryset.reverse(), list(reversed(sequence))) def test_order_by_nulls_last(self): Article.objects.filter(headline="Article 3").update(author=self.author_1) Article.objects.filter(headline="Article 4").update(author=self.author_2) # asc and desc are chainable with nulls_last. self.assertQuerysetEqualReversible( Article.objects.order_by(F("author").desc(nulls_last=True), 'headline'), [self.a4, self.a3, self.a1, self.a2], ) self.assertQuerysetEqualReversible( Article.objects.order_by(F("author").asc(nulls_last=True), 'headline'), [self.a3, self.a4, self.a1, self.a2], ) self.assertQuerysetEqualReversible( Article.objects.order_by(Upper("author__name").desc(nulls_last=True), 'headline'), [self.a4, self.a3, self.a1, self.a2], ) self.assertQuerysetEqualReversible( Article.objects.order_by(Upper("author__name").asc(nulls_last=True), 'headline'), [self.a3, self.a4, self.a1, self.a2], ) def test_order_by_nulls_first(self): Article.objects.filter(headline="Article 3").update(author=self.author_1) Article.objects.filter(headline="Article 4").update(author=self.author_2) # asc and desc are chainable with nulls_first. self.assertQuerysetEqualReversible( Article.objects.order_by(F("author").asc(nulls_first=True), 'headline'), [self.a1, self.a2, self.a3, self.a4], ) self.assertQuerysetEqualReversible( Article.objects.order_by(F("author").desc(nulls_first=True), 'headline'), [self.a1, self.a2, self.a4, self.a3], ) self.assertQuerysetEqualReversible( Article.objects.order_by(Upper("author__name").asc(nulls_first=True), 'headline'), [self.a1, self.a2, self.a3, self.a4], ) self.assertQuerysetEqualReversible( Article.objects.order_by(Upper("author__name").desc(nulls_first=True), 'headline'), [self.a1, self.a2, self.a4, self.a3], ) def test_orders_nulls_first_on_filtered_subquery(self): Article.objects.filter(headline='Article 1').update(author=self.author_1) Article.objects.filter(headline='Article 2').update(author=self.author_1) Article.objects.filter(headline='Article 4').update(author=self.author_2) Author.objects.filter(name__isnull=True).delete() author_3 = Author.objects.create(name='Name 3') article_subquery = Article.objects.filter( author=OuterRef('pk'), headline__icontains='Article', ).order_by().values('author').annotate( last_date=Max('pub_date'), ).values('last_date') self.assertQuerysetEqualReversible( Author.objects.annotate( last_date=Subquery(article_subquery, output_field=DateTimeField()) ).order_by( F('last_date').asc(nulls_first=True) ).distinct(), [author_3, self.author_1, self.author_2], ) def test_stop_slicing(self): """ Use the 'stop' part of slicing notation to limit the results. """ self.assertQuerysetEqual( Article.objects.order_by("headline")[:2], [ "Article 1", "Article 2", ], attrgetter("headline") ) def test_stop_start_slicing(self): """ Use the 'stop' and 'start' parts of slicing notation to offset the result list. """ self.assertQuerysetEqual( Article.objects.order_by("headline")[1:3], [ "Article 2", "Article 3", ], attrgetter("headline") ) def test_random_ordering(self): """ Use '?' to order randomly. """ self.assertEqual( len(list(Article.objects.order_by("?"))), 4 ) def test_reversed_ordering(self): """ Ordering can be reversed using the reverse() method on a queryset. This allows you to extract things like "the last two items" (reverse and then take the first two). """ self.assertQuerysetEqual( Article.objects.all().reverse()[:2], [ "Article 1", "Article 3", ], attrgetter("headline") ) def test_reverse_ordering_pure(self): qs1 = Article.objects.order_by(F('headline').asc()) qs2 = qs1.reverse() self.assertQuerysetEqual( qs2, [ 'Article 4', 'Article 3', 'Article 2', 'Article 1', ], attrgetter('headline'), ) self.assertQuerysetEqual( qs1, [ "Article 1", "Article 2", "Article 3", "Article 4", ], attrgetter("headline") ) def test_reverse_meta_ordering_pure(self): Article.objects.create( headline='Article 5', pub_date=datetime(2005, 7, 30), author=self.author_1, second_author=self.author_2, ) Article.objects.create( headline='Article 5', pub_date=datetime(2005, 7, 30), author=self.author_2, second_author=self.author_1, ) self.assertQuerysetEqual( Article.objects.filter(headline='Article 5').reverse(), ['Name 2', 'Name 1'], attrgetter('author.name'), ) self.assertQuerysetEqual( Article.objects.filter(headline='Article 5'), ['Name 1', 'Name 2'], attrgetter('author.name'), ) def test_no_reordering_after_slicing(self): msg = 'Cannot reverse a query once a slice has been taken.' qs = Article.objects.all()[0:2] with self.assertRaisesMessage(TypeError, msg): qs.reverse() with self.assertRaisesMessage(TypeError, msg): qs.last() def test_extra_ordering(self): """ Ordering can be based on fields included from an 'extra' clause """ self.assertQuerysetEqual( Article.objects.extra(select={"foo": "pub_date"}, order_by=["foo", "headline"]), [ "Article 1", "Article 2", "Article 3", "Article 4", ], attrgetter("headline") ) def test_extra_ordering_quoting(self): """ If the extra clause uses an SQL keyword for a name, it will be protected by quoting. """ self.assertQuerysetEqual( Article.objects.extra(select={"order": "pub_date"}, order_by=["order", "headline"]), [ "Article 1", "Article 2", "Article 3", "Article 4", ], attrgetter("headline") ) def test_extra_ordering_with_table_name(self): self.assertQuerysetEqual( Article.objects.extra(order_by=['ordering_article.headline']), [ "Article 1", "Article 2", "Article 3", "Article 4", ], attrgetter("headline") ) self.assertQuerysetEqual( Article.objects.extra(order_by=['-ordering_article.headline']), [ "Article 4", "Article 3", "Article 2", "Article 1", ], attrgetter("headline") ) def test_order_by_pk(self): """ 'pk' works as an ordering option in Meta. """ self.assertQuerysetEqual( Author.objects.all(), list(reversed(range(1, Author.objects.count() + 1))), attrgetter("pk"), ) def test_order_by_fk_attname(self): """ ordering by a foreign key by its attribute name prevents the query from inheriting its related model ordering option (#19195). """ for i in range(1, 5): author = Author.objects.get(pk=i) article = getattr(self, "a%d" % (5 - i)) article.author = author article.save(update_fields={'author'}) self.assertQuerysetEqual( Article.objects.order_by('author_id'), [ "Article 4", "Article 3", "Article 2", "Article 1", ], attrgetter("headline") ) def test_order_by_f_expression(self): self.assertQuerysetEqual( Article.objects.order_by(F('headline')), [ "Article 1", "Article 2", "Article 3", "Article 4", ], attrgetter("headline") ) self.assertQuerysetEqual( Article.objects.order_by(F('headline').asc()), [ "Article 1", "Article 2", "Article 3", "Article 4", ], attrgetter("headline") ) self.assertQuerysetEqual( Article.objects.order_by(F('headline').desc()), [ "Article 4", "Article 3", "Article 2", "Article 1", ], attrgetter("headline") ) def test_order_by_f_expression_duplicates(self): """ A column may only be included once (the first occurrence) so we check to ensure there are no duplicates by inspecting the SQL. """ qs = Article.objects.order_by(F('headline').asc(), F('headline').desc()) sql = str(qs.query).upper() fragment = sql[sql.find('ORDER BY'):] self.assertEqual(fragment.count('HEADLINE'), 1) self.assertQuerysetEqual( qs, [ "Article 1", "Article 2", "Article 3", "Article 4", ], attrgetter("headline") ) qs = Article.objects.order_by(F('headline').desc(), F('headline').asc()) sql = str(qs.query).upper() fragment = sql[sql.find('ORDER BY'):] self.assertEqual(fragment.count('HEADLINE'), 1) self.assertQuerysetEqual( qs, [ "Article 4", "Article 3", "Article 2", "Article 1", ], attrgetter("headline") ) def test_order_by_constant_value(self): # Order by annotated constant from selected columns. qs = Article.objects.annotate( constant=Value('1', output_field=CharField()), ).order_by('constant', '-headline') self.assertSequenceEqual(qs, [self.a4, self.a3, self.a2, self.a1]) # Order by annotated constant which is out of selected columns. self.assertSequenceEqual( qs.values_list('headline', flat=True), [ 'Article 4', 'Article 3', 'Article 2', 'Article 1', ], ) # Order by constant. qs = Article.objects.order_by(Value('1', output_field=CharField()), '-headline') self.assertSequenceEqual(qs, [self.a4, self.a3, self.a2, self.a1]) def test_order_by_constant_value_without_output_field(self): msg = 'Cannot resolve expression type, unknown output_field' qs = Article.objects.annotate(constant=Value('1')).order_by('constant') for ordered_qs in ( qs, qs.values('headline'), Article.objects.order_by(Value('1')), ): with self.subTest(ordered_qs=ordered_qs), self.assertRaisesMessage(FieldError, msg): ordered_qs.first() def test_related_ordering_duplicate_table_reference(self): """ An ordering referencing a model with an ordering referencing a model multiple time no circular reference should be detected (#24654). """ first_author = Author.objects.create() second_author = Author.objects.create() self.a1.author = first_author self.a1.second_author = second_author self.a1.save() self.a2.author = second_author self.a2.second_author = first_author self.a2.save() r1 = Reference.objects.create(article_id=self.a1.pk) r2 = Reference.objects.create(article_id=self.a2.pk) self.assertSequenceEqual(Reference.objects.all(), [r2, r1]) def test_default_ordering_by_f_expression(self): """F expressions can be used in Meta.ordering.""" articles = OrderedByFArticle.objects.all() articles.filter(headline='Article 2').update(author=self.author_2) articles.filter(headline='Article 3').update(author=self.author_1) self.assertQuerysetEqual( articles, ['Article 1', 'Article 4', 'Article 3', 'Article 2'], attrgetter('headline') ) def test_order_by_ptr_field_with_default_ordering_by_expression(self): ca1 = ChildArticle.objects.create( headline='h2', pub_date=datetime(2005, 7, 27), author=self.author_2, ) ca2 = ChildArticle.objects.create( headline='h2', pub_date=datetime(2005, 7, 27), author=self.author_1, ) ca3 = ChildArticle.objects.create( headline='h3', pub_date=datetime(2005, 7, 27), author=self.author_1, ) ca4 = ChildArticle.objects.create(headline='h1', pub_date=datetime(2005, 7, 28)) articles = ChildArticle.objects.order_by('article_ptr') self.assertSequenceEqual(articles, [ca4, ca2, ca1, ca3]) def test_deprecated_values_annotate(self): msg = ( "Article QuerySet won't use Meta.ordering in Django 3.1. Add " ".order_by('-pub_date', F(headline), OrderBy(F(author__name), " "descending=False), OrderBy(F(second_author__name), " "descending=False)) to retain the current query." ) with self.assertRaisesMessage(RemovedInDjango31Warning, msg): list(Article.objects.values('author').annotate(Count('headline')))
a3e565873b4ca69778f4645ff7a244ea2ce8b70c197889dde0e8084a87384ac2
import unittest from django.conf import settings from django.core.checks import Error, Warning from django.core.checks.model_checks import _check_lazy_references from django.core.exceptions import ImproperlyConfigured from django.db import connection, connections, models from django.db.models.functions import Lower from django.db.models.signals import post_init from django.test import SimpleTestCase from django.test.utils import isolate_apps, override_settings, register_lookup def get_max_column_name_length(): allowed_len = None db_alias = None for db in settings.DATABASES: connection = connections[db] max_name_length = connection.ops.max_name_length() if max_name_length is not None and not connection.features.truncates_names: if allowed_len is None or max_name_length < allowed_len: allowed_len = max_name_length db_alias = db return (allowed_len, db_alias) @isolate_apps('invalid_models_tests') class IndexTogetherTests(SimpleTestCase): def test_non_iterable(self): class Model(models.Model): class Meta: index_together = 42 self.assertEqual(Model.check(), [ Error( "'index_together' must be a list or tuple.", obj=Model, id='models.E008', ), ]) def test_non_list(self): class Model(models.Model): class Meta: index_together = 'not-a-list' self.assertEqual(Model.check(), [ Error( "'index_together' must be a list or tuple.", obj=Model, id='models.E008', ), ]) def test_list_containing_non_iterable(self): class Model(models.Model): class Meta: index_together = [('a', 'b'), 42] self.assertEqual(Model.check(), [ Error( "All 'index_together' elements must be lists or tuples.", obj=Model, id='models.E009', ), ]) def test_pointing_to_missing_field(self): class Model(models.Model): class Meta: index_together = [['missing_field']] self.assertEqual(Model.check(), [ Error( "'index_together' refers to the nonexistent field 'missing_field'.", obj=Model, id='models.E012', ), ]) def test_pointing_to_non_local_field(self): class Foo(models.Model): field1 = models.IntegerField() class Bar(Foo): field2 = models.IntegerField() class Meta: index_together = [['field2', 'field1']] self.assertEqual(Bar.check(), [ Error( "'index_together' refers to field 'field1' which is not " "local to model 'Bar'.", hint='This issue may be caused by multi-table inheritance.', obj=Bar, id='models.E016', ), ]) def test_pointing_to_m2m_field(self): class Model(models.Model): m2m = models.ManyToManyField('self') class Meta: index_together = [['m2m']] self.assertEqual(Model.check(), [ Error( "'index_together' refers to a ManyToManyField 'm2m', but " "ManyToManyFields are not permitted in 'index_together'.", obj=Model, id='models.E013', ), ]) def test_pointing_to_fk(self): class Foo(models.Model): pass class Bar(models.Model): foo_1 = models.ForeignKey(Foo, on_delete=models.CASCADE, related_name='bar_1') foo_2 = models.ForeignKey(Foo, on_delete=models.CASCADE, related_name='bar_2') class Meta: index_together = [['foo_1_id', 'foo_2']] self.assertEqual(Bar.check(), []) # unique_together tests are very similar to index_together tests. @isolate_apps('invalid_models_tests') class UniqueTogetherTests(SimpleTestCase): def test_non_iterable(self): class Model(models.Model): class Meta: unique_together = 42 self.assertEqual(Model.check(), [ Error( "'unique_together' must be a list or tuple.", obj=Model, id='models.E010', ), ]) def test_list_containing_non_iterable(self): class Model(models.Model): one = models.IntegerField() two = models.IntegerField() class Meta: unique_together = [('a', 'b'), 42] self.assertEqual(Model.check(), [ Error( "All 'unique_together' elements must be lists or tuples.", obj=Model, id='models.E011', ), ]) def test_non_list(self): class Model(models.Model): class Meta: unique_together = 'not-a-list' self.assertEqual(Model.check(), [ Error( "'unique_together' must be a list or tuple.", obj=Model, id='models.E010', ), ]) def test_valid_model(self): class Model(models.Model): one = models.IntegerField() two = models.IntegerField() class Meta: # unique_together can be a simple tuple unique_together = ('one', 'two') self.assertEqual(Model.check(), []) def test_pointing_to_missing_field(self): class Model(models.Model): class Meta: unique_together = [['missing_field']] self.assertEqual(Model.check(), [ Error( "'unique_together' refers to the nonexistent field 'missing_field'.", obj=Model, id='models.E012', ), ]) def test_pointing_to_m2m(self): class Model(models.Model): m2m = models.ManyToManyField('self') class Meta: unique_together = [['m2m']] self.assertEqual(Model.check(), [ Error( "'unique_together' refers to a ManyToManyField 'm2m', but " "ManyToManyFields are not permitted in 'unique_together'.", obj=Model, id='models.E013', ), ]) def test_pointing_to_fk(self): class Foo(models.Model): pass class Bar(models.Model): foo_1 = models.ForeignKey(Foo, on_delete=models.CASCADE, related_name='bar_1') foo_2 = models.ForeignKey(Foo, on_delete=models.CASCADE, related_name='bar_2') class Meta: unique_together = [['foo_1_id', 'foo_2']] self.assertEqual(Bar.check(), []) @isolate_apps('invalid_models_tests') class IndexesTests(SimpleTestCase): def test_pointing_to_missing_field(self): class Model(models.Model): class Meta: indexes = [models.Index(fields=['missing_field'], name='name')] self.assertEqual(Model.check(), [ Error( "'indexes' refers to the nonexistent field 'missing_field'.", obj=Model, id='models.E012', ), ]) def test_pointing_to_m2m_field(self): class Model(models.Model): m2m = models.ManyToManyField('self') class Meta: indexes = [models.Index(fields=['m2m'], name='name')] self.assertEqual(Model.check(), [ Error( "'indexes' refers to a ManyToManyField 'm2m', but " "ManyToManyFields are not permitted in 'indexes'.", obj=Model, id='models.E013', ), ]) def test_pointing_to_non_local_field(self): class Foo(models.Model): field1 = models.IntegerField() class Bar(Foo): field2 = models.IntegerField() class Meta: indexes = [models.Index(fields=['field2', 'field1'], name='name')] self.assertEqual(Bar.check(), [ Error( "'indexes' refers to field 'field1' which is not local to " "model 'Bar'.", hint='This issue may be caused by multi-table inheritance.', obj=Bar, id='models.E016', ), ]) def test_pointing_to_fk(self): class Foo(models.Model): pass class Bar(models.Model): foo_1 = models.ForeignKey(Foo, on_delete=models.CASCADE, related_name='bar_1') foo_2 = models.ForeignKey(Foo, on_delete=models.CASCADE, related_name='bar_2') class Meta: indexes = [models.Index(fields=['foo_1_id', 'foo_2'], name='index_name')] self.assertEqual(Bar.check(), []) def test_name_constraints(self): class Model(models.Model): class Meta: indexes = [ models.Index(fields=['id'], name='_index_name'), models.Index(fields=['id'], name='5index_name'), ] self.assertEqual(Model.check(), [ Error( "The index name '%sindex_name' cannot start with an " "underscore or a number." % prefix, obj=Model, id='models.E033', ) for prefix in ('_', '5') ]) def test_max_name_length(self): index_name = 'x' * 31 class Model(models.Model): class Meta: indexes = [models.Index(fields=['id'], name=index_name)] self.assertEqual(Model.check(), [ Error( "The index name '%s' cannot be longer than 30 characters." % index_name, obj=Model, id='models.E034', ), ]) @isolate_apps('invalid_models_tests') class FieldNamesTests(SimpleTestCase): def test_ending_with_underscore(self): class Model(models.Model): field_ = models.CharField(max_length=10) m2m_ = models.ManyToManyField('self') self.assertEqual(Model.check(), [ Error( 'Field names must not end with an underscore.', obj=Model._meta.get_field('field_'), id='fields.E001', ), Error( 'Field names must not end with an underscore.', obj=Model._meta.get_field('m2m_'), id='fields.E001', ), ]) max_column_name_length, column_limit_db_alias = get_max_column_name_length() @unittest.skipIf(max_column_name_length is None, "The database doesn't have a column name length limit.") def test_M2M_long_column_name(self): """ #13711 -- Model check for long M2M column names when database has column name length limits. """ allowed_len, db_alias = get_max_column_name_length() # A model with very long name which will be used to set relations to. class VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz(models.Model): title = models.CharField(max_length=11) # Main model for which checks will be performed. class ModelWithLongField(models.Model): m2m_field = models.ManyToManyField( VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz, related_name='rn1', ) m2m_field2 = models.ManyToManyField( VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz, related_name='rn2', through='m2msimple', ) m2m_field3 = models.ManyToManyField( VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz, related_name='rn3', through='m2mcomplex', ) fk = models.ForeignKey( VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz, models.CASCADE, related_name='rn4', ) # Models used for setting `through` in M2M field. class m2msimple(models.Model): id2 = models.ForeignKey(ModelWithLongField, models.CASCADE) class m2mcomplex(models.Model): id2 = models.ForeignKey(ModelWithLongField, models.CASCADE) long_field_name = 'a' * (self.max_column_name_length + 1) models.ForeignKey( VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz, models.CASCADE, ).contribute_to_class(m2msimple, long_field_name) models.ForeignKey( VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz, models.CASCADE, db_column=long_field_name ).contribute_to_class(m2mcomplex, long_field_name) errors = ModelWithLongField.check() # First error because of M2M field set on the model with long name. m2m_long_name = "verylongmodelnamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz_id" if self.max_column_name_length > len(m2m_long_name): # Some databases support names longer than the test name. expected = [] else: expected = [ Error( 'Autogenerated column name too long for M2M field "%s". ' 'Maximum length is "%s" for database "%s".' % (m2m_long_name, self.max_column_name_length, self.column_limit_db_alias), hint="Use 'through' to create a separate model for " "M2M and then set column_name using 'db_column'.", obj=ModelWithLongField, id='models.E019', ) ] # Second error because the FK specified in the `through` model # `m2msimple` has auto-generated name longer than allowed. # There will be no check errors in the other M2M because it # specifies db_column for the FK in `through` model even if the actual # name is longer than the limits of the database. expected.append( Error( 'Autogenerated column name too long for M2M field "%s_id". ' 'Maximum length is "%s" for database "%s".' % (long_field_name, self.max_column_name_length, self.column_limit_db_alias), hint="Use 'through' to create a separate model for " "M2M and then set column_name using 'db_column'.", obj=ModelWithLongField, id='models.E019', ) ) self.assertEqual(errors, expected) @unittest.skipIf(max_column_name_length is None, "The database doesn't have a column name length limit.") def test_local_field_long_column_name(self): """ #13711 -- Model check for long column names when database does not support long names. """ allowed_len, db_alias = get_max_column_name_length() class ModelWithLongField(models.Model): title = models.CharField(max_length=11) long_field_name = 'a' * (self.max_column_name_length + 1) long_field_name2 = 'b' * (self.max_column_name_length + 1) models.CharField(max_length=11).contribute_to_class(ModelWithLongField, long_field_name) models.CharField(max_length=11, db_column='vlmn').contribute_to_class(ModelWithLongField, long_field_name2) self.assertEqual(ModelWithLongField.check(), [ Error( 'Autogenerated column name too long for field "%s". ' 'Maximum length is "%s" for database "%s".' % (long_field_name, self.max_column_name_length, self.column_limit_db_alias), hint="Set the column name manually using 'db_column'.", obj=ModelWithLongField, id='models.E018', ) ]) def test_including_separator(self): class Model(models.Model): some__field = models.IntegerField() self.assertEqual(Model.check(), [ Error( 'Field names must not contain "__".', obj=Model._meta.get_field('some__field'), id='fields.E002', ) ]) def test_pk(self): class Model(models.Model): pk = models.IntegerField() self.assertEqual(Model.check(), [ Error( "'pk' is a reserved word that cannot be used as a field name.", obj=Model._meta.get_field('pk'), id='fields.E003', ) ]) def test_db_column_clash(self): class Model(models.Model): foo = models.IntegerField() bar = models.IntegerField(db_column='foo') self.assertEqual(Model.check(), [ Error( "Field 'bar' has column name 'foo' that is used by " "another field.", hint="Specify a 'db_column' for the field.", obj=Model, id='models.E007', ) ]) @isolate_apps('invalid_models_tests') class ShadowingFieldsTests(SimpleTestCase): def test_field_name_clash_with_child_accessor(self): class Parent(models.Model): pass class Child(Parent): child = models.CharField(max_length=100) self.assertEqual(Child.check(), [ Error( "The field 'child' clashes with the field " "'child' from model 'invalid_models_tests.parent'.", obj=Child._meta.get_field('child'), id='models.E006', ) ]) def test_multiinheritance_clash(self): class Mother(models.Model): clash = models.IntegerField() class Father(models.Model): clash = models.IntegerField() class Child(Mother, Father): # Here we have two clashed: id (automatic field) and clash, because # both parents define these fields. pass self.assertEqual(Child.check(), [ Error( "The field 'id' from parent model " "'invalid_models_tests.mother' clashes with the field 'id' " "from parent model 'invalid_models_tests.father'.", obj=Child, id='models.E005', ), Error( "The field 'clash' from parent model " "'invalid_models_tests.mother' clashes with the field 'clash' " "from parent model 'invalid_models_tests.father'.", obj=Child, id='models.E005', ) ]) def test_inheritance_clash(self): class Parent(models.Model): f_id = models.IntegerField() class Target(models.Model): # This field doesn't result in a clash. f_id = models.IntegerField() class Child(Parent): # This field clashes with parent "f_id" field. f = models.ForeignKey(Target, models.CASCADE) self.assertEqual(Child.check(), [ Error( "The field 'f' clashes with the field 'f_id' " "from model 'invalid_models_tests.parent'.", obj=Child._meta.get_field('f'), id='models.E006', ) ]) def test_multigeneration_inheritance(self): class GrandParent(models.Model): clash = models.IntegerField() class Parent(GrandParent): pass class Child(Parent): pass class GrandChild(Child): clash = models.IntegerField() self.assertEqual(GrandChild.check(), [ Error( "The field 'clash' clashes with the field 'clash' " "from model 'invalid_models_tests.grandparent'.", obj=GrandChild._meta.get_field('clash'), id='models.E006', ) ]) def test_id_clash(self): class Target(models.Model): pass class Model(models.Model): fk = models.ForeignKey(Target, models.CASCADE) fk_id = models.IntegerField() self.assertEqual(Model.check(), [ Error( "The field 'fk_id' clashes with the field 'fk' from model " "'invalid_models_tests.model'.", obj=Model._meta.get_field('fk_id'), id='models.E006', ) ]) @isolate_apps('invalid_models_tests') class OtherModelTests(SimpleTestCase): def test_unique_primary_key(self): invalid_id = models.IntegerField(primary_key=False) class Model(models.Model): id = invalid_id self.assertEqual(Model.check(), [ Error( "'id' can only be used as a field name if the field also sets " "'primary_key=True'.", obj=Model, id='models.E004', ), ]) def test_ordering_non_iterable(self): class Model(models.Model): class Meta: ordering = 'missing_field' self.assertEqual(Model.check(), [ Error( "'ordering' must be a tuple or list " "(even if you want to order by only one field).", obj=Model, id='models.E014', ), ]) def test_just_ordering_no_errors(self): class Model(models.Model): order = models.PositiveIntegerField() class Meta: ordering = ['order'] self.assertEqual(Model.check(), []) def test_just_order_with_respect_to_no_errors(self): class Question(models.Model): pass class Answer(models.Model): question = models.ForeignKey(Question, models.CASCADE) class Meta: order_with_respect_to = 'question' self.assertEqual(Answer.check(), []) def test_ordering_with_order_with_respect_to(self): class Question(models.Model): pass class Answer(models.Model): question = models.ForeignKey(Question, models.CASCADE) order = models.IntegerField() class Meta: order_with_respect_to = 'question' ordering = ['order'] self.assertEqual(Answer.check(), [ Error( "'ordering' and 'order_with_respect_to' cannot be used together.", obj=Answer, id='models.E021', ), ]) def test_non_valid(self): class RelationModel(models.Model): pass class Model(models.Model): relation = models.ManyToManyField(RelationModel) class Meta: ordering = ['relation'] self.assertEqual(Model.check(), [ Error( "'ordering' refers to the nonexistent field, related field, " "or lookup 'relation'.", obj=Model, id='models.E015', ), ]) def test_ordering_pointing_to_missing_field(self): class Model(models.Model): class Meta: ordering = ('missing_field',) self.assertEqual(Model.check(), [ Error( "'ordering' refers to the nonexistent field, related field, " "or lookup 'missing_field'.", obj=Model, id='models.E015', ) ]) def test_ordering_pointing_to_missing_foreignkey_field(self): class Model(models.Model): missing_fk_field = models.IntegerField() class Meta: ordering = ('missing_fk_field_id',) self.assertEqual(Model.check(), [ Error( "'ordering' refers to the nonexistent field, related field, " "or lookup 'missing_fk_field_id'.", obj=Model, id='models.E015', ) ]) def test_ordering_pointing_to_missing_related_field(self): class Model(models.Model): test = models.IntegerField() class Meta: ordering = ('missing_related__id',) self.assertEqual(Model.check(), [ Error( "'ordering' refers to the nonexistent field, related field, " "or lookup 'missing_related__id'.", obj=Model, id='models.E015', ) ]) def test_ordering_pointing_to_missing_related_model_field(self): class Parent(models.Model): pass class Child(models.Model): parent = models.ForeignKey(Parent, models.CASCADE) class Meta: ordering = ('parent__missing_field',) self.assertEqual(Child.check(), [ Error( "'ordering' refers to the nonexistent field, related field, " "or lookup 'parent__missing_field'.", obj=Child, id='models.E015', ) ]) def test_ordering_pointing_to_non_related_field(self): class Child(models.Model): parent = models.IntegerField() class Meta: ordering = ('parent__missing_field',) self.assertEqual(Child.check(), [ Error( "'ordering' refers to the nonexistent field, related field, " "or lookup 'parent__missing_field'.", obj=Child, id='models.E015', ) ]) def test_ordering_pointing_to_two_related_model_field(self): class Parent2(models.Model): pass class Parent1(models.Model): parent2 = models.ForeignKey(Parent2, models.CASCADE) class Child(models.Model): parent1 = models.ForeignKey(Parent1, models.CASCADE) class Meta: ordering = ('parent1__parent2__missing_field',) self.assertEqual(Child.check(), [ Error( "'ordering' refers to the nonexistent field, related field, " "or lookup 'parent1__parent2__missing_field'.", obj=Child, id='models.E015', ) ]) def test_ordering_allows_registered_lookups(self): class Model(models.Model): test = models.CharField(max_length=100) class Meta: ordering = ('test__lower',) with register_lookup(models.CharField, Lower): self.assertEqual(Model.check(), []) def test_ordering_pointing_to_foreignkey_field(self): class Parent(models.Model): pass class Child(models.Model): parent = models.ForeignKey(Parent, models.CASCADE) class Meta: ordering = ('parent_id',) self.assertFalse(Child.check()) def test_name_beginning_with_underscore(self): class _Model(models.Model): pass self.assertEqual(_Model.check(), [ Error( "The model name '_Model' cannot start or end with an underscore " "as it collides with the query lookup syntax.", obj=_Model, id='models.E023', ) ]) def test_name_ending_with_underscore(self): class Model_(models.Model): pass self.assertEqual(Model_.check(), [ Error( "The model name 'Model_' cannot start or end with an underscore " "as it collides with the query lookup syntax.", obj=Model_, id='models.E023', ) ]) def test_name_contains_double_underscores(self): class Test__Model(models.Model): pass self.assertEqual(Test__Model.check(), [ Error( "The model name 'Test__Model' cannot contain double underscores " "as it collides with the query lookup syntax.", obj=Test__Model, id='models.E024', ) ]) def test_property_and_related_field_accessor_clash(self): class Model(models.Model): fk = models.ForeignKey('self', models.CASCADE) @property def fk_id(self): pass self.assertEqual(Model.check(), [ Error( "The property 'fk_id' clashes with a related field accessor.", obj=Model, id='models.E025', ) ]) def test_single_primary_key(self): class Model(models.Model): foo = models.IntegerField(primary_key=True) bar = models.IntegerField(primary_key=True) self.assertEqual(Model.check(), [ Error( "The model cannot have more than one field with 'primary_key=True'.", obj=Model, id='models.E026', ) ]) @override_settings(TEST_SWAPPED_MODEL_BAD_VALUE='not-a-model') def test_swappable_missing_app_name(self): class Model(models.Model): class Meta: swappable = 'TEST_SWAPPED_MODEL_BAD_VALUE' self.assertEqual(Model.check(), [ Error( "'TEST_SWAPPED_MODEL_BAD_VALUE' is not of the form 'app_label.app_name'.", id='models.E001', ), ]) @override_settings(TEST_SWAPPED_MODEL_BAD_MODEL='not_an_app.Target') def test_swappable_missing_app(self): class Model(models.Model): class Meta: swappable = 'TEST_SWAPPED_MODEL_BAD_MODEL' self.assertEqual(Model.check(), [ Error( "'TEST_SWAPPED_MODEL_BAD_MODEL' references 'not_an_app.Target', " 'which has not been installed, or is abstract.', id='models.E002', ), ]) def test_two_m2m_through_same_relationship(self): class Person(models.Model): pass class Group(models.Model): primary = models.ManyToManyField(Person, through='Membership', related_name='primary') secondary = models.ManyToManyField(Person, through='Membership', related_name='secondary') class Membership(models.Model): person = models.ForeignKey(Person, models.CASCADE) group = models.ForeignKey(Group, models.CASCADE) self.assertEqual(Group.check(), [ Error( "The model has two identical many-to-many relations through " "the intermediate model 'invalid_models_tests.Membership'.", obj=Group, id='models.E003', ) ]) def test_two_m2m_through_same_model_with_different_through_fields(self): class Country(models.Model): pass class ShippingMethod(models.Model): to_countries = models.ManyToManyField( Country, through='ShippingMethodPrice', through_fields=('method', 'to_country'), ) from_countries = models.ManyToManyField( Country, through='ShippingMethodPrice', through_fields=('method', 'from_country'), related_name='+', ) class ShippingMethodPrice(models.Model): method = models.ForeignKey(ShippingMethod, models.CASCADE) to_country = models.ForeignKey(Country, models.CASCADE) from_country = models.ForeignKey(Country, models.CASCADE) self.assertEqual(ShippingMethod.check(), []) def test_missing_parent_link(self): msg = 'Add parent_link=True to invalid_models_tests.ParkingLot.parent.' with self.assertRaisesMessage(ImproperlyConfigured, msg): class Place(models.Model): pass class ParkingLot(Place): parent = models.OneToOneField(Place, models.CASCADE) def test_m2m_table_name_clash(self): class Foo(models.Model): bar = models.ManyToManyField('Bar', db_table='myapp_bar') class Meta: db_table = 'myapp_foo' class Bar(models.Model): class Meta: db_table = 'myapp_bar' self.assertEqual(Foo.check(), [ Error( "The field's intermediary table 'myapp_bar' clashes with the " "table name of 'invalid_models_tests.Bar'.", obj=Foo._meta.get_field('bar'), id='fields.E340', ) ]) def test_m2m_field_table_name_clash(self): class Foo(models.Model): pass class Bar(models.Model): foos = models.ManyToManyField(Foo, db_table='clash') class Baz(models.Model): foos = models.ManyToManyField(Foo, db_table='clash') self.assertEqual(Bar.check() + Baz.check(), [ Error( "The field's intermediary table 'clash' clashes with the " "table name of 'invalid_models_tests.Baz.foos'.", obj=Bar._meta.get_field('foos'), id='fields.E340', ), Error( "The field's intermediary table 'clash' clashes with the " "table name of 'invalid_models_tests.Bar.foos'.", obj=Baz._meta.get_field('foos'), id='fields.E340', ) ]) def test_m2m_autogenerated_table_name_clash(self): class Foo(models.Model): class Meta: db_table = 'bar_foos' class Bar(models.Model): # The autogenerated `db_table` will be bar_foos. foos = models.ManyToManyField(Foo) class Meta: db_table = 'bar' self.assertEqual(Bar.check(), [ Error( "The field's intermediary table 'bar_foos' clashes with the " "table name of 'invalid_models_tests.Foo'.", obj=Bar._meta.get_field('foos'), id='fields.E340', ) ]) def test_m2m_unmanaged_shadow_models_not_checked(self): class A1(models.Model): pass class C1(models.Model): mm_a = models.ManyToManyField(A1, db_table='d1') # Unmanaged models that shadow the above models. Reused table names # shouldn't be flagged by any checks. class A2(models.Model): class Meta: managed = False class C2(models.Model): mm_a = models.ManyToManyField(A2, through='Intermediate') class Meta: managed = False class Intermediate(models.Model): a2 = models.ForeignKey(A2, models.CASCADE, db_column='a1_id') c2 = models.ForeignKey(C2, models.CASCADE, db_column='c1_id') class Meta: db_table = 'd1' managed = False self.assertEqual(C1.check(), []) self.assertEqual(C2.check(), []) def test_m2m_to_concrete_and_proxy_allowed(self): class A(models.Model): pass class Through(models.Model): a = models.ForeignKey('A', models.CASCADE) c = models.ForeignKey('C', models.CASCADE) class ThroughProxy(Through): class Meta: proxy = True class C(models.Model): mm_a = models.ManyToManyField(A, through=Through) mm_aproxy = models.ManyToManyField(A, through=ThroughProxy, related_name='proxied_m2m') self.assertEqual(C.check(), []) @isolate_apps('django.contrib.auth', kwarg_name='apps') def test_lazy_reference_checks(self, apps): class DummyModel(models.Model): author = models.ForeignKey('Author', models.CASCADE) class Meta: app_label = 'invalid_models_tests' class DummyClass: def __call__(self, **kwargs): pass def dummy_method(self): pass def dummy_function(*args, **kwargs): pass apps.lazy_model_operation(dummy_function, ('auth', 'imaginarymodel')) apps.lazy_model_operation(dummy_function, ('fanciful_app', 'imaginarymodel')) post_init.connect(dummy_function, sender='missing-app.Model', apps=apps) post_init.connect(DummyClass(), sender='missing-app.Model', apps=apps) post_init.connect(DummyClass().dummy_method, sender='missing-app.Model', apps=apps) self.assertEqual(_check_lazy_references(apps), [ Error( "%r contains a lazy reference to auth.imaginarymodel, " "but app 'auth' doesn't provide model 'imaginarymodel'." % dummy_function, obj=dummy_function, id='models.E022', ), Error( "%r contains a lazy reference to fanciful_app.imaginarymodel, " "but app 'fanciful_app' isn't installed." % dummy_function, obj=dummy_function, id='models.E022', ), Error( "An instance of class 'DummyClass' was connected to " "the 'post_init' signal with a lazy reference to the sender " "'missing-app.model', but app 'missing-app' isn't installed.", hint=None, obj='invalid_models_tests.test_models', id='signals.E001', ), Error( "Bound method 'DummyClass.dummy_method' was connected to the " "'post_init' signal with a lazy reference to the sender " "'missing-app.model', but app 'missing-app' isn't installed.", hint=None, obj='invalid_models_tests.test_models', id='signals.E001', ), Error( "The field invalid_models_tests.DummyModel.author was declared " "with a lazy reference to 'invalid_models_tests.author', but app " "'invalid_models_tests' isn't installed.", hint=None, obj=DummyModel.author.field, id='fields.E307', ), Error( "The function 'dummy_function' was connected to the 'post_init' " "signal with a lazy reference to the sender " "'missing-app.model', but app 'missing-app' isn't installed.", hint=None, obj='invalid_models_tests.test_models', id='signals.E001', ), ]) @isolate_apps('invalid_models_tests') class ConstraintsTests(SimpleTestCase): def test_check_constraints(self): class Model(models.Model): age = models.IntegerField() class Meta: constraints = [models.CheckConstraint(check=models.Q(age__gte=18), name='is_adult')] errors = Model.check() warn = Warning( '%s does not support check constraints.' % connection.display_name, hint=( "A constraint won't be created. Silence this warning if you " "don't care about it." ), obj=Model, id='models.W027', ) expected = [] if connection.features.supports_table_check_constraints else [warn, warn] self.assertCountEqual(errors, expected) def test_check_constraints_required_db_features(self): class Model(models.Model): age = models.IntegerField() class Meta: required_db_features = {'supports_table_check_constraints'} constraints = [models.CheckConstraint(check=models.Q(age__gte=18), name='is_adult')] self.assertEqual(Model.check(), [])
603eb0f9e2eaa9008ca54a4ed10bda1b35e2a9f00b3716fcd7def463f3853cdf
import unittest from django.core import validators from django.core.exceptions import ValidationError from django.db import IntegrityError, connection, models from django.test import SimpleTestCase, TestCase from .models import ( BigIntegerModel, IntegerModel, PositiveIntegerModel, PositiveSmallIntegerModel, SmallIntegerModel, ) class IntegerFieldTests(TestCase): model = IntegerModel documented_range = (-2147483648, 2147483647) @property def backend_range(self): field = self.model._meta.get_field('value') internal_type = field.get_internal_type() return connection.ops.integer_field_range(internal_type) def test_documented_range(self): """ Values within the documented safe range pass validation, and can be saved and retrieved without corruption. """ min_value, max_value = self.documented_range instance = self.model(value=min_value) instance.full_clean() instance.save() qs = self.model.objects.filter(value__lte=min_value) self.assertEqual(qs.count(), 1) self.assertEqual(qs[0].value, min_value) instance = self.model(value=max_value) instance.full_clean() instance.save() qs = self.model.objects.filter(value__gte=max_value) self.assertEqual(qs.count(), 1) self.assertEqual(qs[0].value, max_value) def test_backend_range_save(self): """ Backend specific ranges can be saved without corruption. """ min_value, max_value = self.backend_range if min_value is not None: instance = self.model(value=min_value) instance.full_clean() instance.save() qs = self.model.objects.filter(value__lte=min_value) self.assertEqual(qs.count(), 1) self.assertEqual(qs[0].value, min_value) if max_value is not None: instance = self.model(value=max_value) instance.full_clean() instance.save() qs = self.model.objects.filter(value__gte=max_value) self.assertEqual(qs.count(), 1) self.assertEqual(qs[0].value, max_value) def test_backend_range_validation(self): """ Backend specific ranges are enforced at the model validation level (#12030). """ min_value, max_value = self.backend_range if min_value is not None: instance = self.model(value=min_value - 1) expected_message = validators.MinValueValidator.message % { 'limit_value': min_value, } with self.assertRaisesMessage(ValidationError, expected_message): instance.full_clean() instance.value = min_value instance.full_clean() if max_value is not None: instance = self.model(value=max_value + 1) expected_message = validators.MaxValueValidator.message % { 'limit_value': max_value, } with self.assertRaisesMessage(ValidationError, expected_message): instance.full_clean() instance.value = max_value instance.full_clean() def test_redundant_backend_range_validators(self): """ If there are stricter validators than the ones from the database backend then the backend validators aren't added. """ min_backend_value, max_backend_value = self.backend_range for callable_limit in (True, False): with self.subTest(callable_limit=callable_limit): if min_backend_value is not None: min_custom_value = min_backend_value + 1 limit_value = (lambda: min_custom_value) if callable_limit else min_custom_value ranged_value_field = self.model._meta.get_field('value').__class__( validators=[validators.MinValueValidator(limit_value)] ) field_range_message = validators.MinValueValidator.message % { 'limit_value': min_custom_value, } with self.assertRaisesMessage(ValidationError, '[%r]' % field_range_message): ranged_value_field.run_validators(min_backend_value - 1) if max_backend_value is not None: max_custom_value = max_backend_value - 1 limit_value = (lambda: max_custom_value) if callable_limit else max_custom_value ranged_value_field = self.model._meta.get_field('value').__class__( validators=[validators.MaxValueValidator(limit_value)] ) field_range_message = validators.MaxValueValidator.message % { 'limit_value': max_custom_value, } with self.assertRaisesMessage(ValidationError, '[%r]' % field_range_message): ranged_value_field.run_validators(max_backend_value + 1) def test_types(self): instance = self.model(value=0) self.assertIsInstance(instance.value, int) instance.save() self.assertIsInstance(instance.value, int) instance = self.model.objects.get() self.assertIsInstance(instance.value, int) def test_coercing(self): self.model.objects.create(value='10') instance = self.model.objects.get(value='10') self.assertEqual(instance.value, 10) def test_invalid_value(self): tests = [ (TypeError, ()), (TypeError, []), (TypeError, {}), (TypeError, set()), (TypeError, object()), (TypeError, complex()), (ValueError, 'non-numeric string'), (ValueError, b'non-numeric byte-string'), ] for exception, value in tests: with self.subTest(value): msg = "Field 'value' expected a number but got %r." % (value,) with self.assertRaisesMessage(exception, msg): self.model.objects.create(value=value) class SmallIntegerFieldTests(IntegerFieldTests): model = SmallIntegerModel documented_range = (-32768, 32767) class BigIntegerFieldTests(IntegerFieldTests): model = BigIntegerModel documented_range = (-9223372036854775808, 9223372036854775807) class PositiveSmallIntegerFieldTests(IntegerFieldTests): model = PositiveSmallIntegerModel documented_range = (0, 32767) class PositiveIntegerFieldTests(IntegerFieldTests): model = PositiveIntegerModel documented_range = (0, 2147483647) @unittest.skipIf(connection.vendor == 'sqlite', "SQLite doesn't have a constraint.") def test_negative_values(self): p = PositiveIntegerModel.objects.create(value=0) p.value = models.F('value') - 1 with self.assertRaises(IntegrityError): p.save() class ValidationTests(SimpleTestCase): def test_integerfield_cleans_valid_string(self): f = models.IntegerField() self.assertEqual(f.clean('2', None), 2) def test_integerfield_raises_error_on_invalid_intput(self): f = models.IntegerField() with self.assertRaises(ValidationError): f.clean('a', None) def test_choices_validation_supports_named_groups(self): f = models.IntegerField(choices=(('group', ((10, 'A'), (20, 'B'))), (30, 'C'))) self.assertEqual(10, f.clean(10, None)) def test_nullable_integerfield_raises_error_with_blank_false(self): f = models.IntegerField(null=True, blank=False) with self.assertRaises(ValidationError): f.clean(None, None) def test_nullable_integerfield_cleans_none_on_null_and_blank_true(self): f = models.IntegerField(null=True, blank=True) self.assertIsNone(f.clean(None, None)) def test_integerfield_raises_error_on_empty_input(self): f = models.IntegerField(null=False) with self.assertRaises(ValidationError): f.clean(None, None) with self.assertRaises(ValidationError): f.clean('', None) def test_integerfield_validates_zero_against_choices(self): f = models.IntegerField(choices=((1, 1),)) with self.assertRaises(ValidationError): f.clean('0', None)
1ef2997478c3a36773c165d032448f6251573935cad24814769a32c24da5ae5d
from django.test import TestCase from .models import AutoModel, BigAutoModel, SmallAutoModel class AutoFieldTests(TestCase): model = AutoModel def test_invalid_value(self): tests = [ (TypeError, ()), (TypeError, []), (TypeError, {}), (TypeError, set()), (TypeError, object()), (TypeError, complex()), (ValueError, 'non-numeric string'), (ValueError, b'non-numeric byte-string'), ] for exception, value in tests: with self.subTest(value=value): msg = "Field 'value' expected a number but got %r." % (value,) with self.assertRaisesMessage(exception, msg): self.model.objects.create(value=value) class BigAutoFieldTests(AutoFieldTests): model = BigAutoModel class SmallAutoFieldTests(AutoFieldTests): model = SmallAutoModel
413e530af90b42c6e4a99b2fb880017c3195f242be9f0bfdcd9ef307e4f6dde5
from django.core.exceptions import ValidationError from django.db import IntegrityError, connection, models from django.db.models.constraints import BaseConstraint from django.test import SimpleTestCase, TestCase, skipUnlessDBFeature from .models import ChildModel, Product, UniqueConstraintProduct def get_constraints(table): with connection.cursor() as cursor: return connection.introspection.get_constraints(cursor, table) class BaseConstraintTests(SimpleTestCase): def test_constraint_sql(self): c = BaseConstraint('name') msg = 'This method must be implemented by a subclass.' with self.assertRaisesMessage(NotImplementedError, msg): c.constraint_sql(None, None) def test_create_sql(self): c = BaseConstraint('name') msg = 'This method must be implemented by a subclass.' with self.assertRaisesMessage(NotImplementedError, msg): c.create_sql(None, None) def test_remove_sql(self): c = BaseConstraint('name') msg = 'This method must be implemented by a subclass.' with self.assertRaisesMessage(NotImplementedError, msg): c.remove_sql(None, None) class CheckConstraintTests(TestCase): def test_eq(self): check1 = models.Q(price__gt=models.F('discounted_price')) check2 = models.Q(price__lt=models.F('discounted_price')) self.assertEqual( models.CheckConstraint(check=check1, name='price'), models.CheckConstraint(check=check1, name='price'), ) self.assertNotEqual( models.CheckConstraint(check=check1, name='price'), models.CheckConstraint(check=check1, name='price2'), ) self.assertNotEqual( models.CheckConstraint(check=check1, name='price'), models.CheckConstraint(check=check2, name='price'), ) self.assertNotEqual(models.CheckConstraint(check=check1, name='price'), 1) def test_repr(self): check = models.Q(price__gt=models.F('discounted_price')) name = 'price_gt_discounted_price' constraint = models.CheckConstraint(check=check, name=name) self.assertEqual( repr(constraint), "<CheckConstraint: check='{}' name='{}'>".format(check, name), ) def test_deconstruction(self): check = models.Q(price__gt=models.F('discounted_price')) name = 'price_gt_discounted_price' constraint = models.CheckConstraint(check=check, name=name) path, args, kwargs = constraint.deconstruct() self.assertEqual(path, 'django.db.models.CheckConstraint') self.assertEqual(args, ()) self.assertEqual(kwargs, {'check': check, 'name': name}) @skipUnlessDBFeature('supports_table_check_constraints') def test_database_constraint(self): Product.objects.create(price=10, discounted_price=5) with self.assertRaises(IntegrityError): Product.objects.create(price=10, discounted_price=20) @skipUnlessDBFeature('supports_table_check_constraints', 'can_introspect_check_constraints') def test_name(self): constraints = get_constraints(Product._meta.db_table) for expected_name in ( 'price_gt_discounted_price', 'constraints_product_price_gt_0', ): with self.subTest(expected_name): self.assertIn(expected_name, constraints) @skipUnlessDBFeature('supports_table_check_constraints', 'can_introspect_check_constraints') def test_abstract_name(self): constraints = get_constraints(ChildModel._meta.db_table) self.assertIn('constraints_childmodel_adult', constraints) class UniqueConstraintTests(TestCase): @classmethod def setUpTestData(cls): cls.p1, cls.p2 = UniqueConstraintProduct.objects.bulk_create([ UniqueConstraintProduct(name='p1', color='red'), UniqueConstraintProduct(name='p2'), ]) def test_eq(self): self.assertEqual( models.UniqueConstraint(fields=['foo', 'bar'], name='unique'), models.UniqueConstraint(fields=['foo', 'bar'], name='unique'), ) self.assertNotEqual( models.UniqueConstraint(fields=['foo', 'bar'], name='unique'), models.UniqueConstraint(fields=['foo', 'bar'], name='unique2'), ) self.assertNotEqual( models.UniqueConstraint(fields=['foo', 'bar'], name='unique'), models.UniqueConstraint(fields=['foo', 'baz'], name='unique'), ) self.assertNotEqual(models.UniqueConstraint(fields=['foo', 'bar'], name='unique'), 1) def test_eq_with_condition(self): self.assertEqual( models.UniqueConstraint( fields=['foo', 'bar'], name='unique', condition=models.Q(foo=models.F('bar')) ), models.UniqueConstraint( fields=['foo', 'bar'], name='unique', condition=models.Q(foo=models.F('bar'))), ) self.assertNotEqual( models.UniqueConstraint( fields=['foo', 'bar'], name='unique', condition=models.Q(foo=models.F('bar')) ), models.UniqueConstraint( fields=['foo', 'bar'], name='unique', condition=models.Q(foo=models.F('baz')) ), ) def test_repr(self): fields = ['foo', 'bar'] name = 'unique_fields' constraint = models.UniqueConstraint(fields=fields, name=name) self.assertEqual( repr(constraint), "<UniqueConstraint: fields=('foo', 'bar') name='unique_fields'>", ) def test_repr_with_condition(self): constraint = models.UniqueConstraint( fields=['foo', 'bar'], name='unique_fields', condition=models.Q(foo=models.F('bar')), ) self.assertEqual( repr(constraint), "<UniqueConstraint: fields=('foo', 'bar') name='unique_fields' " "condition=(AND: ('foo', F(bar)))>", ) def test_deconstruction(self): fields = ['foo', 'bar'] name = 'unique_fields' constraint = models.UniqueConstraint(fields=fields, name=name) path, args, kwargs = constraint.deconstruct() self.assertEqual(path, 'django.db.models.UniqueConstraint') self.assertEqual(args, ()) self.assertEqual(kwargs, {'fields': tuple(fields), 'name': name}) def test_deconstruction_with_condition(self): fields = ['foo', 'bar'] name = 'unique_fields' condition = models.Q(foo=models.F('bar')) constraint = models.UniqueConstraint(fields=fields, name=name, condition=condition) path, args, kwargs = constraint.deconstruct() self.assertEqual(path, 'django.db.models.UniqueConstraint') self.assertEqual(args, ()) self.assertEqual(kwargs, {'fields': tuple(fields), 'name': name, 'condition': condition}) def test_database_constraint(self): with self.assertRaises(IntegrityError): UniqueConstraintProduct.objects.create(name=self.p1.name, color=self.p1.color) def test_model_validation(self): msg = 'Unique constraint product with this Name and Color already exists.' with self.assertRaisesMessage(ValidationError, msg): UniqueConstraintProduct(name=self.p1.name, color=self.p1.color).validate_unique() def test_model_validation_with_condition(self): """Partial unique constraints are ignored by Model.validate_unique().""" UniqueConstraintProduct(name=self.p1.name, color='blue').validate_unique() UniqueConstraintProduct(name=self.p2.name).validate_unique() def test_name(self): constraints = get_constraints(UniqueConstraintProduct._meta.db_table) expected_name = 'name_color_uniq' self.assertIn(expected_name, constraints) def test_condition_must_be_q(self): with self.assertRaisesMessage(ValueError, 'UniqueConstraint.condition must be a Q instance.'): models.UniqueConstraint(name='uniq', fields=['name'], condition='invalid')
cd6375c42a3e25276fb3ca4fb8cd1e46611df6f14427f9dbdf56a260d2d8c3b1
import warnings from datetime import datetime from django.core.paginator import ( EmptyPage, InvalidPage, PageNotAnInteger, Paginator, QuerySetPaginator, UnorderedObjectListWarning, ) from django.test import SimpleTestCase, TestCase from django.utils.deprecation import RemovedInDjango31Warning from .custom import ValidAdjacentNumsPaginator from .models import Article class PaginationTests(SimpleTestCase): """ Tests for the Paginator and Page classes. """ def check_paginator(self, params, output): """ Helper method that instantiates a Paginator object from the passed params and then checks that its attributes match the passed output. """ count, num_pages, page_range = output paginator = Paginator(*params) self.check_attribute('count', paginator, count, params) self.check_attribute('num_pages', paginator, num_pages, params) self.check_attribute('page_range', paginator, page_range, params, coerce=list) def check_attribute(self, name, paginator, expected, params, coerce=None): """ Helper method that checks a single attribute and gives a nice error message upon test failure. """ got = getattr(paginator, name) if coerce is not None: got = coerce(got) self.assertEqual( expected, got, "For '%s', expected %s but got %s. Paginator parameters were: %s" % (name, expected, got, params) ) def test_paginator(self): """ Tests the paginator attributes using varying inputs. """ nine = [1, 2, 3, 4, 5, 6, 7, 8, 9] ten = nine + [10] eleven = ten + [11] tests = ( # Each item is two tuples: # First tuple is Paginator parameters - object_list, per_page, # orphans, and allow_empty_first_page. # Second tuple is resulting Paginator attributes - count, # num_pages, and page_range. # Ten items, varying orphans, no empty first page. ((ten, 4, 0, False), (10, 3, [1, 2, 3])), ((ten, 4, 1, False), (10, 3, [1, 2, 3])), ((ten, 4, 2, False), (10, 2, [1, 2])), ((ten, 4, 5, False), (10, 2, [1, 2])), ((ten, 4, 6, False), (10, 1, [1])), # Ten items, varying orphans, allow empty first page. ((ten, 4, 0, True), (10, 3, [1, 2, 3])), ((ten, 4, 1, True), (10, 3, [1, 2, 3])), ((ten, 4, 2, True), (10, 2, [1, 2])), ((ten, 4, 5, True), (10, 2, [1, 2])), ((ten, 4, 6, True), (10, 1, [1])), # One item, varying orphans, no empty first page. (([1], 4, 0, False), (1, 1, [1])), (([1], 4, 1, False), (1, 1, [1])), (([1], 4, 2, False), (1, 1, [1])), # One item, varying orphans, allow empty first page. (([1], 4, 0, True), (1, 1, [1])), (([1], 4, 1, True), (1, 1, [1])), (([1], 4, 2, True), (1, 1, [1])), # Zero items, varying orphans, no empty first page. (([], 4, 0, False), (0, 0, [])), (([], 4, 1, False), (0, 0, [])), (([], 4, 2, False), (0, 0, [])), # Zero items, varying orphans, allow empty first page. (([], 4, 0, True), (0, 1, [1])), (([], 4, 1, True), (0, 1, [1])), (([], 4, 2, True), (0, 1, [1])), # Number if items one less than per_page. (([], 1, 0, True), (0, 1, [1])), (([], 1, 0, False), (0, 0, [])), (([1], 2, 0, True), (1, 1, [1])), ((nine, 10, 0, True), (9, 1, [1])), # Number if items equal to per_page. (([1], 1, 0, True), (1, 1, [1])), (([1, 2], 2, 0, True), (2, 1, [1])), ((ten, 10, 0, True), (10, 1, [1])), # Number if items one more than per_page. (([1, 2], 1, 0, True), (2, 2, [1, 2])), (([1, 2, 3], 2, 0, True), (3, 2, [1, 2])), ((eleven, 10, 0, True), (11, 2, [1, 2])), # Number if items one more than per_page with one orphan. (([1, 2], 1, 1, True), (2, 1, [1])), (([1, 2, 3], 2, 1, True), (3, 1, [1])), ((eleven, 10, 1, True), (11, 1, [1])), # Non-integer inputs ((ten, '4', 1, False), (10, 3, [1, 2, 3])), ((ten, '4', 1, False), (10, 3, [1, 2, 3])), ((ten, 4, '1', False), (10, 3, [1, 2, 3])), ((ten, 4, '1', False), (10, 3, [1, 2, 3])), ) for params, output in tests: self.check_paginator(params, output) def test_invalid_page_number(self): """ Invalid page numbers result in the correct exception being raised. """ paginator = Paginator([1, 2, 3], 2) with self.assertRaises(InvalidPage): paginator.page(3) with self.assertRaises(PageNotAnInteger): paginator.validate_number(None) with self.assertRaises(PageNotAnInteger): paginator.validate_number('x') with self.assertRaises(PageNotAnInteger): paginator.validate_number(1.2) def test_float_integer_page(self): paginator = Paginator([1, 2, 3], 2) self.assertEqual(paginator.validate_number(1.0), 1) def test_no_content_allow_empty_first_page(self): # With no content and allow_empty_first_page=True, 1 is a valid page number paginator = Paginator([], 2) self.assertEqual(paginator.validate_number(1), 1) def test_paginate_misc_classes(self): class CountContainer: def count(self): return 42 # Paginator can be passed other objects with a count() method. paginator = Paginator(CountContainer(), 10) self.assertEqual(42, paginator.count) self.assertEqual(5, paginator.num_pages) self.assertEqual([1, 2, 3, 4, 5], list(paginator.page_range)) # Paginator can be passed other objects that implement __len__. class LenContainer: def __len__(self): return 42 paginator = Paginator(LenContainer(), 10) self.assertEqual(42, paginator.count) self.assertEqual(5, paginator.num_pages) self.assertEqual([1, 2, 3, 4, 5], list(paginator.page_range)) def test_count_does_not_silence_attribute_error(self): class AttributeErrorContainer: def count(self): raise AttributeError('abc') with self.assertRaisesMessage(AttributeError, 'abc'): Paginator(AttributeErrorContainer(), 10).count() def test_count_does_not_silence_type_error(self): class TypeErrorContainer: def count(self): raise TypeError('abc') with self.assertRaisesMessage(TypeError, 'abc'): Paginator(TypeErrorContainer(), 10).count() def check_indexes(self, params, page_num, indexes): """ Helper method that instantiates a Paginator object from the passed params and then checks that the start and end indexes of the passed page_num match those given as a 2-tuple in indexes. """ paginator = Paginator(*params) if page_num == 'first': page_num = 1 elif page_num == 'last': page_num = paginator.num_pages page = paginator.page(page_num) start, end = indexes msg = ("For %s of page %s, expected %s but got %s. Paginator parameters were: %s") self.assertEqual(start, page.start_index(), msg % ('start index', page_num, start, page.start_index(), params)) self.assertEqual(end, page.end_index(), msg % ('end index', page_num, end, page.end_index(), params)) def test_page_indexes(self): """ Paginator pages have the correct start and end indexes. """ ten = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] tests = ( # Each item is three tuples: # First tuple is Paginator parameters - object_list, per_page, # orphans, and allow_empty_first_page. # Second tuple is the start and end indexes of the first page. # Third tuple is the start and end indexes of the last page. # Ten items, varying per_page, no orphans. ((ten, 1, 0, True), (1, 1), (10, 10)), ((ten, 2, 0, True), (1, 2), (9, 10)), ((ten, 3, 0, True), (1, 3), (10, 10)), ((ten, 5, 0, True), (1, 5), (6, 10)), # Ten items, varying per_page, with orphans. ((ten, 1, 1, True), (1, 1), (9, 10)), ((ten, 1, 2, True), (1, 1), (8, 10)), ((ten, 3, 1, True), (1, 3), (7, 10)), ((ten, 3, 2, True), (1, 3), (7, 10)), ((ten, 3, 4, True), (1, 3), (4, 10)), ((ten, 5, 1, True), (1, 5), (6, 10)), ((ten, 5, 2, True), (1, 5), (6, 10)), ((ten, 5, 5, True), (1, 10), (1, 10)), # One item, varying orphans, no empty first page. (([1], 4, 0, False), (1, 1), (1, 1)), (([1], 4, 1, False), (1, 1), (1, 1)), (([1], 4, 2, False), (1, 1), (1, 1)), # One item, varying orphans, allow empty first page. (([1], 4, 0, True), (1, 1), (1, 1)), (([1], 4, 1, True), (1, 1), (1, 1)), (([1], 4, 2, True), (1, 1), (1, 1)), # Zero items, varying orphans, allow empty first page. (([], 4, 0, True), (0, 0), (0, 0)), (([], 4, 1, True), (0, 0), (0, 0)), (([], 4, 2, True), (0, 0), (0, 0)), ) for params, first, last in tests: self.check_indexes(params, 'first', first) self.check_indexes(params, 'last', last) # When no items and no empty first page, we should get EmptyPage error. with self.assertRaises(EmptyPage): self.check_indexes(([], 4, 0, False), 1, None) with self.assertRaises(EmptyPage): self.check_indexes(([], 4, 1, False), 1, None) with self.assertRaises(EmptyPage): self.check_indexes(([], 4, 2, False), 1, None) def test_page_sequence(self): """ A paginator page acts like a standard sequence. """ eleven = 'abcdefghijk' page2 = Paginator(eleven, per_page=5, orphans=1).page(2) self.assertEqual(len(page2), 6) self.assertIn('k', page2) self.assertNotIn('a', page2) self.assertEqual(''.join(page2), 'fghijk') self.assertEqual(''.join(reversed(page2)), 'kjihgf') def test_get_page_hook(self): """ A Paginator subclass can use the ``_get_page`` hook to return an alternative to the standard Page class. """ eleven = 'abcdefghijk' paginator = ValidAdjacentNumsPaginator(eleven, per_page=6) page1 = paginator.page(1) page2 = paginator.page(2) self.assertIsNone(page1.previous_page_number()) self.assertEqual(page1.next_page_number(), 2) self.assertEqual(page2.previous_page_number(), 1) self.assertIsNone(page2.next_page_number()) def test_page_range_iterator(self): """ Paginator.page_range should be an iterator. """ self.assertIsInstance(Paginator([1, 2, 3], 2).page_range, type(range(0))) def test_get_page(self): """ Paginator.get_page() returns a valid page even with invalid page arguments. """ paginator = Paginator([1, 2, 3], 2) page = paginator.get_page(1) self.assertEqual(page.number, 1) self.assertEqual(page.object_list, [1, 2]) # An empty page returns the last page. self.assertEqual(paginator.get_page(3).number, 2) # Non-integer page returns the first page. self.assertEqual(paginator.get_page(None).number, 1) def test_get_page_empty_object_list(self): """Paginator.get_page() with an empty object_list.""" paginator = Paginator([], 2) # An empty page returns the last page. self.assertEqual(paginator.get_page(1).number, 1) self.assertEqual(paginator.get_page(2).number, 1) # Non-integer page returns the first page. self.assertEqual(paginator.get_page(None).number, 1) def test_get_page_empty_object_list_and_allow_empty_first_page_false(self): """ Paginator.get_page() raises EmptyPage if allow_empty_first_page=False and object_list is empty. """ paginator = Paginator([], 2, allow_empty_first_page=False) with self.assertRaises(EmptyPage): paginator.get_page(1) def test_querysetpaginator_deprecation(self): msg = 'The QuerySetPaginator alias of Paginator is deprecated.' with self.assertWarnsMessage(RemovedInDjango31Warning, msg) as cm: QuerySetPaginator([], 1) self.assertEqual(cm.filename, __file__) class ModelPaginationTests(TestCase): """ Test pagination with Django model instances """ @classmethod def setUpTestData(cls): # Prepare a list of objects for pagination. for x in range(1, 10): a = Article(headline='Article %s' % x, pub_date=datetime(2005, 7, 29)) a.save() def test_first_page(self): paginator = Paginator(Article.objects.order_by('id'), 5) p = paginator.page(1) self.assertEqual("<Page 1 of 2>", str(p)) self.assertQuerysetEqual(p.object_list, [ "<Article: Article 1>", "<Article: Article 2>", "<Article: Article 3>", "<Article: Article 4>", "<Article: Article 5>" ]) self.assertTrue(p.has_next()) self.assertFalse(p.has_previous()) self.assertTrue(p.has_other_pages()) self.assertEqual(2, p.next_page_number()) with self.assertRaises(InvalidPage): p.previous_page_number() self.assertEqual(1, p.start_index()) self.assertEqual(5, p.end_index()) def test_last_page(self): paginator = Paginator(Article.objects.order_by('id'), 5) p = paginator.page(2) self.assertEqual("<Page 2 of 2>", str(p)) self.assertQuerysetEqual(p.object_list, [ "<Article: Article 6>", "<Article: Article 7>", "<Article: Article 8>", "<Article: Article 9>" ]) self.assertFalse(p.has_next()) self.assertTrue(p.has_previous()) self.assertTrue(p.has_other_pages()) with self.assertRaises(InvalidPage): p.next_page_number() self.assertEqual(1, p.previous_page_number()) self.assertEqual(6, p.start_index()) self.assertEqual(9, p.end_index()) def test_page_getitem(self): """ Tests proper behavior of a paginator page __getitem__ (queryset evaluation, slicing, exception raised). """ paginator = Paginator(Article.objects.order_by('id'), 5) p = paginator.page(1) # Make sure object_list queryset is not evaluated by an invalid __getitem__ call. # (this happens from the template engine when using eg: {% page_obj.has_previous %}) self.assertIsNone(p.object_list._result_cache) msg = 'Page indices must be integers or slices, not str.' with self.assertRaisesMessage(TypeError, msg): p['has_previous'] self.assertIsNone(p.object_list._result_cache) self.assertNotIsInstance(p.object_list, list) # Make sure slicing the Page object with numbers and slice objects work. self.assertEqual(p[0], Article.objects.get(headline='Article 1')) self.assertQuerysetEqual(p[slice(2)], [ "<Article: Article 1>", "<Article: Article 2>", ] ) # After __getitem__ is called, object_list is a list self.assertIsInstance(p.object_list, list) def test_paginating_unordered_queryset_raises_warning(self): msg = ( "Pagination may yield inconsistent results with an unordered " "object_list: <class 'pagination.models.Article'> QuerySet." ) with self.assertWarnsMessage(UnorderedObjectListWarning, msg) as cm: Paginator(Article.objects.all(), 5) # The warning points at the Paginator caller (i.e. the stacklevel # is appropriate). self.assertEqual(cm.filename, __file__) def test_paginating_empty_queryset_does_not_warn(self): with warnings.catch_warnings(record=True) as recorded: Paginator(Article.objects.none(), 5) self.assertEqual(len(recorded), 0) def test_paginating_unordered_object_list_raises_warning(self): """ Unordered object list warning with an object that has an ordered attribute but not a model attribute. """ class ObjectList: ordered = False object_list = ObjectList() msg = ( "Pagination may yield inconsistent results with an unordered " "object_list: {!r}.".format(object_list) ) with self.assertWarnsMessage(UnorderedObjectListWarning, msg): Paginator(object_list, 5)
8578830e972cc5c57a8a29e3897b38dd725acfe258a2b752e2a9cdb68d66df82
import datetime import pickle import unittest import uuid from copy import deepcopy from django.core.exceptions import FieldError from django.db import DatabaseError, connection, models from django.db.models import CharField, Q, TimeField, UUIDField from django.db.models.aggregates import ( Avg, Count, Max, Min, StdDev, Sum, Variance, ) from django.db.models.expressions import ( Case, Col, Combinable, Exists, Expression, ExpressionList, ExpressionWrapper, F, Func, OrderBy, OuterRef, Random, RawSQL, Ref, Subquery, Value, When, ) from django.db.models.functions import ( Coalesce, Concat, Length, Lower, Substr, Upper, ) from django.db.models.sql import constants from django.db.models.sql.datastructures import Join from django.test import SimpleTestCase, TestCase, skipUnlessDBFeature from django.test.utils import Approximate, isolate_apps from .models import ( UUID, UUIDPK, Company, Employee, Experiment, Number, RemoteEmployee, Result, SimulationRun, Time, ) class BasicExpressionsTests(TestCase): @classmethod def setUpTestData(cls): cls.example_inc = Company.objects.create( name="Example Inc.", num_employees=2300, num_chairs=5, ceo=Employee.objects.create(firstname="Joe", lastname="Smith", salary=10) ) cls.foobar_ltd = Company.objects.create( name="Foobar Ltd.", num_employees=3, num_chairs=4, ceo=Employee.objects.create(firstname="Frank", lastname="Meyer", salary=20) ) cls.max = Employee.objects.create(firstname='Max', lastname='Mustermann', salary=30) cls.gmbh = Company.objects.create(name='Test GmbH', num_employees=32, num_chairs=1, ceo=cls.max) def setUp(self): self.company_query = Company.objects.values( "name", "num_employees", "num_chairs" ).order_by( "name", "num_employees", "num_chairs" ) def test_annotate_values_aggregate(self): companies = Company.objects.annotate( salaries=F('ceo__salary'), ).values('num_employees', 'salaries').aggregate( result=Sum( F('salaries') + F('num_employees'), output_field=models.IntegerField() ), ) self.assertEqual(companies['result'], 2395) def test_annotate_values_filter(self): companies = Company.objects.annotate( foo=RawSQL('%s', ['value']), ).filter(foo='value').order_by('name') self.assertQuerysetEqual( companies, ['<Company: Example Inc.>', '<Company: Foobar Ltd.>', '<Company: Test GmbH>'], ) def test_annotate_values_count(self): companies = Company.objects.annotate(foo=RawSQL('%s', ['value'])) self.assertEqual(companies.count(), 3) @unittest.skipIf(connection.vendor == 'oracle', "Oracle doesn't support using boolean type in SELECT") def test_filtering_on_annotate_that_uses_q(self): self.assertEqual( Company.objects.annotate( num_employees_check=ExpressionWrapper(Q(num_employees__gt=3), output_field=models.BooleanField()) ).filter(num_employees_check=True).count(), 2, ) def test_filter_inter_attribute(self): # We can filter on attribute relationships on same model obj, e.g. # find companies where the number of employees is greater # than the number of chairs. self.assertSequenceEqual( self.company_query.filter(num_employees__gt=F("num_chairs")), [ { "num_chairs": 5, "name": "Example Inc.", "num_employees": 2300, }, { "num_chairs": 1, "name": "Test GmbH", "num_employees": 32 }, ], ) def test_update(self): # We can set one field to have the value of another field # Make sure we have enough chairs self.company_query.update(num_chairs=F("num_employees")) self.assertSequenceEqual( self.company_query, [ { "num_chairs": 2300, "name": "Example Inc.", "num_employees": 2300 }, { "num_chairs": 3, "name": "Foobar Ltd.", "num_employees": 3 }, { "num_chairs": 32, "name": "Test GmbH", "num_employees": 32 } ], ) def test_arithmetic(self): # We can perform arithmetic operations in expressions # Make sure we have 2 spare chairs self.company_query.update(num_chairs=F("num_employees") + 2) self.assertSequenceEqual( self.company_query, [ { 'num_chairs': 2302, 'name': 'Example Inc.', 'num_employees': 2300 }, { 'num_chairs': 5, 'name': 'Foobar Ltd.', 'num_employees': 3 }, { 'num_chairs': 34, 'name': 'Test GmbH', 'num_employees': 32 } ], ) def test_order_of_operations(self): # Law of order of operations is followed self.company_query.update(num_chairs=F('num_employees') + 2 * F('num_employees')) self.assertSequenceEqual( self.company_query, [ { 'num_chairs': 6900, 'name': 'Example Inc.', 'num_employees': 2300 }, { 'num_chairs': 9, 'name': 'Foobar Ltd.', 'num_employees': 3 }, { 'num_chairs': 96, 'name': 'Test GmbH', 'num_employees': 32 } ], ) def test_parenthesis_priority(self): # Law of order of operations can be overridden by parentheses self.company_query.update(num_chairs=(F('num_employees') + 2) * F('num_employees')) self.assertSequenceEqual( self.company_query, [ { 'num_chairs': 5294600, 'name': 'Example Inc.', 'num_employees': 2300 }, { 'num_chairs': 15, 'name': 'Foobar Ltd.', 'num_employees': 3 }, { 'num_chairs': 1088, 'name': 'Test GmbH', 'num_employees': 32 } ], ) def test_update_with_fk(self): # ForeignKey can become updated with the value of another ForeignKey. self.assertEqual(Company.objects.update(point_of_contact=F('ceo')), 3) self.assertQuerysetEqual( Company.objects.all(), ['Joe Smith', 'Frank Meyer', 'Max Mustermann'], lambda c: str(c.point_of_contact), ordered=False ) def test_update_with_none(self): Number.objects.create(integer=1, float=1.0) Number.objects.create(integer=2) Number.objects.filter(float__isnull=False).update(float=Value(None)) self.assertQuerysetEqual( Number.objects.all(), [None, None], lambda n: n.float, ordered=False ) def test_filter_with_join(self): # F Expressions can also span joins Company.objects.update(point_of_contact=F('ceo')) c = Company.objects.first() c.point_of_contact = Employee.objects.create(firstname="Guido", lastname="van Rossum") c.save() self.assertQuerysetEqual( Company.objects.filter(ceo__firstname=F('point_of_contact__firstname')), ['Foobar Ltd.', 'Test GmbH'], lambda c: c.name, ordered=False ) Company.objects.exclude( ceo__firstname=F("point_of_contact__firstname") ).update(name="foo") self.assertEqual( Company.objects.exclude( ceo__firstname=F('point_of_contact__firstname') ).get().name, "foo", ) msg = "Joined field references are not permitted in this query" with self.assertRaisesMessage(FieldError, msg): Company.objects.exclude( ceo__firstname=F('point_of_contact__firstname') ).update(name=F('point_of_contact__lastname')) def test_object_update(self): # F expressions can be used to update attributes on single objects self.gmbh.num_employees = F('num_employees') + 4 self.gmbh.save() self.gmbh.refresh_from_db() self.assertEqual(self.gmbh.num_employees, 36) def test_new_object_save(self): # We should be able to use Funcs when inserting new data test_co = Company(name=Lower(Value('UPPER')), num_employees=32, num_chairs=1, ceo=self.max) test_co.save() test_co.refresh_from_db() self.assertEqual(test_co.name, "upper") def test_new_object_create(self): test_co = Company.objects.create(name=Lower(Value('UPPER')), num_employees=32, num_chairs=1, ceo=self.max) test_co.refresh_from_db() self.assertEqual(test_co.name, "upper") def test_object_create_with_aggregate(self): # Aggregates are not allowed when inserting new data msg = 'Aggregate functions are not allowed in this query (num_employees=Max(Value(1))).' with self.assertRaisesMessage(FieldError, msg): Company.objects.create( name='Company', num_employees=Max(Value(1)), num_chairs=1, ceo=Employee.objects.create(firstname="Just", lastname="Doit", salary=30), ) def test_object_update_fk(self): # F expressions cannot be used to update attributes which are foreign # keys, or attributes which involve joins. test_gmbh = Company.objects.get(pk=self.gmbh.pk) msg = 'F(ceo)": "Company.point_of_contact" must be a "Employee" instance.' with self.assertRaisesMessage(ValueError, msg): test_gmbh.point_of_contact = F('ceo') test_gmbh.point_of_contact = self.gmbh.ceo test_gmbh.save() test_gmbh.name = F('ceo__lastname') msg = 'Joined field references are not permitted in this query' with self.assertRaisesMessage(FieldError, msg): test_gmbh.save() def test_update_inherited_field_value(self): msg = 'Joined field references are not permitted in this query' with self.assertRaisesMessage(FieldError, msg): RemoteEmployee.objects.update(adjusted_salary=F('salary') * 5) def test_object_update_unsaved_objects(self): # F expressions cannot be used to update attributes on objects which do # not yet exist in the database acme = Company(name='The Acme Widget Co.', num_employees=12, num_chairs=5, ceo=self.max) acme.num_employees = F("num_employees") + 16 msg = ( 'Failed to insert expression "Col(expressions_company, ' 'expressions.Company.num_employees) + Value(16)" on ' 'expressions.Company.num_employees. F() expressions can only be ' 'used to update, not to insert.' ) with self.assertRaisesMessage(ValueError, msg): acme.save() acme.num_employees = 12 acme.name = Lower(F('name')) msg = ( 'Failed to insert expression "Lower(Col(expressions_company, ' 'expressions.Company.name))" on expressions.Company.name. F() ' 'expressions can only be used to update, not to insert.' ) with self.assertRaisesMessage(ValueError, msg): acme.save() def test_ticket_11722_iexact_lookup(self): Employee.objects.create(firstname="John", lastname="Doe") Employee.objects.create(firstname="Test", lastname="test") queryset = Employee.objects.filter(firstname__iexact=F('lastname')) self.assertQuerysetEqual(queryset, ["<Employee: Test test>"]) def test_ticket_16731_startswith_lookup(self): Employee.objects.create(firstname="John", lastname="Doe") e2 = Employee.objects.create(firstname="Jack", lastname="Jackson") e3 = Employee.objects.create(firstname="Jack", lastname="jackson") self.assertSequenceEqual( Employee.objects.filter(lastname__startswith=F('firstname')), [e2, e3] if connection.features.has_case_insensitive_like else [e2] ) qs = Employee.objects.filter(lastname__istartswith=F('firstname')).order_by('pk') self.assertSequenceEqual(qs, [e2, e3]) def test_ticket_18375_join_reuse(self): # Reverse multijoin F() references and the lookup target the same join. # Pre #18375 the F() join was generated first and the lookup couldn't # reuse that join. qs = Employee.objects.filter(company_ceo_set__num_chairs=F('company_ceo_set__num_employees')) self.assertEqual(str(qs.query).count('JOIN'), 1) def test_ticket_18375_kwarg_ordering(self): # The next query was dict-randomization dependent - if the "gte=1" # was seen first, then the F() will reuse the join generated by the # gte lookup, if F() was seen first, then it generated a join the # other lookups could not reuse. qs = Employee.objects.filter( company_ceo_set__num_chairs=F('company_ceo_set__num_employees'), company_ceo_set__num_chairs__gte=1, ) self.assertEqual(str(qs.query).count('JOIN'), 1) def test_ticket_18375_kwarg_ordering_2(self): # Another similar case for F() than above. Now we have the same join # in two filter kwargs, one in the lhs lookup, one in F. Here pre # #18375 the amount of joins generated was random if dict # randomization was enabled, that is the generated query dependent # on which clause was seen first. qs = Employee.objects.filter( company_ceo_set__num_employees=F('pk'), pk=F('company_ceo_set__num_employees') ) self.assertEqual(str(qs.query).count('JOIN'), 1) def test_ticket_18375_chained_filters(self): # F() expressions do not reuse joins from previous filter. qs = Employee.objects.filter( company_ceo_set__num_employees=F('pk') ).filter( company_ceo_set__num_employees=F('company_ceo_set__num_employees') ) self.assertEqual(str(qs.query).count('JOIN'), 2) def test_order_by_exists(self): mary = Employee.objects.create(firstname='Mary', lastname='Mustermann', salary=20) mustermanns_by_seniority = Employee.objects.filter(lastname='Mustermann').order_by( # Order by whether the employee is the CEO of a company Exists(Company.objects.filter(ceo=OuterRef('pk'))).desc() ) self.assertSequenceEqual(mustermanns_by_seniority, [self.max, mary]) def test_order_by_multiline_sql(self): raw_order_by = ( RawSQL(''' CASE WHEN num_employees > 1000 THEN num_chairs ELSE 0 END ''', []).desc(), RawSQL(''' CASE WHEN num_chairs > 1 THEN 1 ELSE 0 END ''', []).asc() ) for qs in ( Company.objects.all(), Company.objects.distinct(), ): with self.subTest(qs=qs): self.assertSequenceEqual( qs.order_by(*raw_order_by), [self.example_inc, self.gmbh, self.foobar_ltd], ) def test_outerref(self): inner = Company.objects.filter(point_of_contact=OuterRef('pk')) msg = ( 'This queryset contains a reference to an outer query and may only ' 'be used in a subquery.' ) with self.assertRaisesMessage(ValueError, msg): inner.exists() outer = Employee.objects.annotate(is_point_of_contact=Exists(inner)) self.assertIs(outer.exists(), True) def test_exist_single_field_output_field(self): queryset = Company.objects.values('pk') self.assertIsInstance(Exists(queryset).output_field, models.BooleanField) def test_subquery(self): Company.objects.filter(name='Example Inc.').update( point_of_contact=Employee.objects.get(firstname='Joe', lastname='Smith'), ceo=self.max, ) Employee.objects.create(firstname='Bob', lastname='Brown', salary=40) qs = Employee.objects.annotate( is_point_of_contact=Exists(Company.objects.filter(point_of_contact=OuterRef('pk'))), is_not_point_of_contact=~Exists(Company.objects.filter(point_of_contact=OuterRef('pk'))), is_ceo_of_small_company=Exists(Company.objects.filter(num_employees__lt=200, ceo=OuterRef('pk'))), is_ceo_small_2=~~Exists(Company.objects.filter(num_employees__lt=200, ceo=OuterRef('pk'))), largest_company=Subquery(Company.objects.order_by('-num_employees').filter( models.Q(ceo=OuterRef('pk')) | models.Q(point_of_contact=OuterRef('pk')) ).values('name')[:1], output_field=models.CharField()) ).values( 'firstname', 'is_point_of_contact', 'is_not_point_of_contact', 'is_ceo_of_small_company', 'is_ceo_small_2', 'largest_company', ).order_by('firstname') results = list(qs) # Could use Coalesce(subq, Value('')) instead except for the bug in # cx_Oracle mentioned in #23843. bob = results[0] if bob['largest_company'] == '' and connection.features.interprets_empty_strings_as_nulls: bob['largest_company'] = None self.assertEqual(results, [ { 'firstname': 'Bob', 'is_point_of_contact': False, 'is_not_point_of_contact': True, 'is_ceo_of_small_company': False, 'is_ceo_small_2': False, 'largest_company': None, }, { 'firstname': 'Frank', 'is_point_of_contact': False, 'is_not_point_of_contact': True, 'is_ceo_of_small_company': True, 'is_ceo_small_2': True, 'largest_company': 'Foobar Ltd.', }, { 'firstname': 'Joe', 'is_point_of_contact': True, 'is_not_point_of_contact': False, 'is_ceo_of_small_company': False, 'is_ceo_small_2': False, 'largest_company': 'Example Inc.', }, { 'firstname': 'Max', 'is_point_of_contact': False, 'is_not_point_of_contact': True, 'is_ceo_of_small_company': True, 'is_ceo_small_2': True, 'largest_company': 'Example Inc.' } ]) # A less elegant way to write the same query: this uses a LEFT OUTER # JOIN and an IS NULL, inside a WHERE NOT IN which is probably less # efficient than EXISTS. self.assertCountEqual( qs.filter(is_point_of_contact=True).values('pk'), Employee.objects.exclude(company_point_of_contact_set=None).values('pk') ) def test_in_subquery(self): # This is a contrived test (and you really wouldn't write this query), # but it is a succinct way to test the __in=Subquery() construct. small_companies = Company.objects.filter(num_employees__lt=200).values('pk') subquery_test = Company.objects.filter(pk__in=Subquery(small_companies)) self.assertCountEqual(subquery_test, [self.foobar_ltd, self.gmbh]) subquery_test2 = Company.objects.filter(pk=Subquery(small_companies.filter(num_employees=3))) self.assertCountEqual(subquery_test2, [self.foobar_ltd]) def test_uuid_pk_subquery(self): u = UUIDPK.objects.create() UUID.objects.create(uuid_fk=u) qs = UUIDPK.objects.filter(id__in=Subquery(UUID.objects.values('uuid_fk__id'))) self.assertCountEqual(qs, [u]) def test_nested_subquery(self): inner = Company.objects.filter(point_of_contact=OuterRef('pk')) outer = Employee.objects.annotate(is_point_of_contact=Exists(inner)) contrived = Employee.objects.annotate( is_point_of_contact=Subquery( outer.filter(pk=OuterRef('pk')).values('is_point_of_contact'), output_field=models.BooleanField(), ), ) self.assertCountEqual(contrived.values_list(), outer.values_list()) def test_nested_subquery_outer_ref_2(self): first = Time.objects.create(time='09:00') second = Time.objects.create(time='17:00') third = Time.objects.create(time='21:00') SimulationRun.objects.bulk_create([ SimulationRun(start=first, end=second, midpoint='12:00'), SimulationRun(start=first, end=third, midpoint='15:00'), SimulationRun(start=second, end=first, midpoint='00:00'), ]) inner = Time.objects.filter(time=OuterRef(OuterRef('time')), pk=OuterRef('start')).values('time') middle = SimulationRun.objects.annotate(other=Subquery(inner)).values('other')[:1] outer = Time.objects.annotate(other=Subquery(middle, output_field=models.TimeField())) # This is a contrived example. It exercises the double OuterRef form. self.assertCountEqual(outer, [first, second, third]) def test_nested_subquery_outer_ref_with_autofield(self): first = Time.objects.create(time='09:00') second = Time.objects.create(time='17:00') SimulationRun.objects.create(start=first, end=second, midpoint='12:00') inner = SimulationRun.objects.filter(start=OuterRef(OuterRef('pk'))).values('start') middle = Time.objects.annotate(other=Subquery(inner)).values('other')[:1] outer = Time.objects.annotate(other=Subquery(middle, output_field=models.IntegerField())) # This exercises the double OuterRef form with AutoField as pk. self.assertCountEqual(outer, [first, second]) def test_annotations_within_subquery(self): Company.objects.filter(num_employees__lt=50).update(ceo=Employee.objects.get(firstname='Frank')) inner = Company.objects.filter( ceo=OuterRef('pk') ).values('ceo').annotate(total_employees=models.Sum('num_employees')).values('total_employees') outer = Employee.objects.annotate(total_employees=Subquery(inner)).filter(salary__lte=Subquery(inner)) self.assertSequenceEqual( outer.order_by('-total_employees').values('salary', 'total_employees'), [{'salary': 10, 'total_employees': 2300}, {'salary': 20, 'total_employees': 35}], ) def test_subquery_references_joined_table_twice(self): inner = Company.objects.filter( num_chairs__gte=OuterRef('ceo__salary'), num_employees__gte=OuterRef('point_of_contact__salary'), ) # Another contrived example (there is no need to have a subquery here) outer = Company.objects.filter(pk__in=Subquery(inner.values('pk'))) self.assertFalse(outer.exists()) def test_subquery_filter_by_aggregate(self): Number.objects.create(integer=1000, float=1.2) Employee.objects.create(salary=1000) qs = Number.objects.annotate( min_valuable_count=Subquery( Employee.objects.filter( salary=OuterRef('integer'), ).annotate(cnt=Count('salary')).filter(cnt__gt=0).values('cnt')[:1] ), ) self.assertEqual(qs.get().float, 1.2) def test_aggregate_subquery_annotation(self): with self.assertNumQueries(1) as ctx: aggregate = Company.objects.annotate( ceo_salary=Subquery( Employee.objects.filter( id=OuterRef('ceo_id'), ).values('salary') ), ).aggregate( ceo_salary_gt_20=Count('pk', filter=Q(ceo_salary__gt=20)), ) self.assertEqual(aggregate, {'ceo_salary_gt_20': 1}) # Aggregation over a subquery annotation doesn't annotate the subquery # twice in the inner query. sql = ctx.captured_queries[0]['sql'] self.assertLessEqual(sql.count('SELECT'), 3) # GROUP BY isn't required to aggregate over a query that doesn't # contain nested aggregates. self.assertNotIn('GROUP BY', sql) def test_explicit_output_field(self): class FuncA(Func): output_field = models.CharField() class FuncB(Func): pass expr = FuncB(FuncA()) self.assertEqual(expr.output_field, FuncA.output_field) def test_outerref_mixed_case_table_name(self): inner = Result.objects.filter(result_time__gte=OuterRef('experiment__assigned')) outer = Result.objects.filter(pk__in=Subquery(inner.values('pk'))) self.assertFalse(outer.exists()) def test_outerref_with_operator(self): inner = Company.objects.filter(num_employees=OuterRef('ceo__salary') + 2) outer = Company.objects.filter(pk__in=Subquery(inner.values('pk'))) self.assertEqual(outer.get().name, 'Test GmbH') def test_annotation_with_outerref(self): gmbh_salary = Company.objects.annotate( max_ceo_salary_raise=Subquery( Company.objects.annotate( salary_raise=OuterRef('num_employees') + F('num_employees'), ).order_by('-salary_raise').values('salary_raise')[:1], output_field=models.IntegerField(), ), ).get(pk=self.gmbh.pk) self.assertEqual(gmbh_salary.max_ceo_salary_raise, 2332) def test_pickle_expression(self): expr = Value(1, output_field=models.IntegerField()) expr.convert_value # populate cached property self.assertEqual(pickle.loads(pickle.dumps(expr)), expr) def test_incorrect_field_in_F_expression(self): with self.assertRaisesMessage(FieldError, "Cannot resolve keyword 'nope' into field."): list(Employee.objects.filter(firstname=F('nope'))) def test_incorrect_joined_field_in_F_expression(self): with self.assertRaisesMessage(FieldError, "Cannot resolve keyword 'nope' into field."): list(Company.objects.filter(ceo__pk=F('point_of_contact__nope'))) class IterableLookupInnerExpressionsTests(TestCase): @classmethod def setUpTestData(cls): ceo = Employee.objects.create(firstname='Just', lastname='Doit', salary=30) # MySQL requires that the values calculated for expressions don't pass # outside of the field's range, so it's inconvenient to use the values # in the more general tests. Company.objects.create(name='5020 Ltd', num_employees=50, num_chairs=20, ceo=ceo) Company.objects.create(name='5040 Ltd', num_employees=50, num_chairs=40, ceo=ceo) Company.objects.create(name='5050 Ltd', num_employees=50, num_chairs=50, ceo=ceo) Company.objects.create(name='5060 Ltd', num_employees=50, num_chairs=60, ceo=ceo) Company.objects.create(name='99300 Ltd', num_employees=99, num_chairs=300, ceo=ceo) def test_in_lookup_allows_F_expressions_and_expressions_for_integers(self): # __in lookups can use F() expressions for integers. queryset = Company.objects.filter(num_employees__in=([F('num_chairs') - 10])) self.assertQuerysetEqual(queryset, ['<Company: 5060 Ltd>'], ordered=False) self.assertQuerysetEqual( Company.objects.filter(num_employees__in=([F('num_chairs') - 10, F('num_chairs') + 10])), ['<Company: 5040 Ltd>', '<Company: 5060 Ltd>'], ordered=False ) self.assertQuerysetEqual( Company.objects.filter( num_employees__in=([F('num_chairs') - 10, F('num_chairs'), F('num_chairs') + 10]) ), ['<Company: 5040 Ltd>', '<Company: 5050 Ltd>', '<Company: 5060 Ltd>'], ordered=False ) def test_expressions_in_lookups_join_choice(self): midpoint = datetime.time(13, 0) t1 = Time.objects.create(time=datetime.time(12, 0)) t2 = Time.objects.create(time=datetime.time(14, 0)) SimulationRun.objects.create(start=t1, end=t2, midpoint=midpoint) SimulationRun.objects.create(start=t1, end=None, midpoint=midpoint) SimulationRun.objects.create(start=None, end=t2, midpoint=midpoint) SimulationRun.objects.create(start=None, end=None, midpoint=midpoint) queryset = SimulationRun.objects.filter(midpoint__range=[F('start__time'), F('end__time')]) self.assertQuerysetEqual( queryset, ['<SimulationRun: 13:00:00 (12:00:00 to 14:00:00)>'], ordered=False ) for alias in queryset.query.alias_map.values(): if isinstance(alias, Join): self.assertEqual(alias.join_type, constants.INNER) queryset = SimulationRun.objects.exclude(midpoint__range=[F('start__time'), F('end__time')]) self.assertQuerysetEqual(queryset, [], ordered=False) for alias in queryset.query.alias_map.values(): if isinstance(alias, Join): self.assertEqual(alias.join_type, constants.LOUTER) def test_range_lookup_allows_F_expressions_and_expressions_for_integers(self): # Range lookups can use F() expressions for integers. Company.objects.filter(num_employees__exact=F("num_chairs")) self.assertQuerysetEqual( Company.objects.filter(num_employees__range=(F('num_chairs'), 100)), ['<Company: 5020 Ltd>', '<Company: 5040 Ltd>', '<Company: 5050 Ltd>'], ordered=False ) self.assertQuerysetEqual( Company.objects.filter(num_employees__range=(F('num_chairs') - 10, F('num_chairs') + 10)), ['<Company: 5040 Ltd>', '<Company: 5050 Ltd>', '<Company: 5060 Ltd>'], ordered=False ) self.assertQuerysetEqual( Company.objects.filter(num_employees__range=(F('num_chairs') - 10, 100)), ['<Company: 5020 Ltd>', '<Company: 5040 Ltd>', '<Company: 5050 Ltd>', '<Company: 5060 Ltd>'], ordered=False ) self.assertQuerysetEqual( Company.objects.filter(num_employees__range=(1, 100)), [ '<Company: 5020 Ltd>', '<Company: 5040 Ltd>', '<Company: 5050 Ltd>', '<Company: 5060 Ltd>', '<Company: 99300 Ltd>', ], ordered=False ) @unittest.skipUnless(connection.vendor == 'sqlite', "This defensive test only works on databases that don't validate parameter types") def test_complex_expressions_do_not_introduce_sql_injection_via_untrusted_string_inclusion(self): """ This tests that SQL injection isn't possible using compilation of expressions in iterable filters, as their compilation happens before the main query compilation. It's limited to SQLite, as PostgreSQL, Oracle and other vendors have defense in depth against this by type checking. Testing against SQLite (the most permissive of the built-in databases) demonstrates that the problem doesn't exist while keeping the test simple. """ queryset = Company.objects.filter(name__in=[F('num_chairs') + '1)) OR ((1==1']) self.assertQuerysetEqual(queryset, [], ordered=False) def test_in_lookup_allows_F_expressions_and_expressions_for_datetimes(self): start = datetime.datetime(2016, 2, 3, 15, 0, 0) end = datetime.datetime(2016, 2, 5, 15, 0, 0) experiment_1 = Experiment.objects.create( name='Integrity testing', assigned=start.date(), start=start, end=end, completed=end.date(), estimated_time=end - start, ) experiment_2 = Experiment.objects.create( name='Taste testing', assigned=start.date(), start=start, end=end, completed=end.date(), estimated_time=end - start, ) Result.objects.create( experiment=experiment_1, result_time=datetime.datetime(2016, 2, 4, 15, 0, 0), ) Result.objects.create( experiment=experiment_1, result_time=datetime.datetime(2016, 3, 10, 2, 0, 0), ) Result.objects.create( experiment=experiment_2, result_time=datetime.datetime(2016, 1, 8, 5, 0, 0), ) within_experiment_time = [F('experiment__start'), F('experiment__end')] queryset = Result.objects.filter(result_time__range=within_experiment_time) self.assertQuerysetEqual(queryset, ["<Result: Result at 2016-02-04 15:00:00>"]) within_experiment_time = [F('experiment__start'), F('experiment__end')] queryset = Result.objects.filter(result_time__range=within_experiment_time) self.assertQuerysetEqual(queryset, ["<Result: Result at 2016-02-04 15:00:00>"]) class FTests(SimpleTestCase): def test_deepcopy(self): f = F("foo") g = deepcopy(f) self.assertEqual(f.name, g.name) def test_deconstruct(self): f = F('name') path, args, kwargs = f.deconstruct() self.assertEqual(path, 'django.db.models.expressions.F') self.assertEqual(args, (f.name,)) self.assertEqual(kwargs, {}) def test_equal(self): f = F('name') same_f = F('name') other_f = F('username') self.assertEqual(f, same_f) self.assertNotEqual(f, other_f) def test_hash(self): d = {F('name'): 'Bob'} self.assertIn(F('name'), d) self.assertEqual(d[F('name')], 'Bob') def test_not_equal_Value(self): f = F('name') value = Value('name') self.assertNotEqual(f, value) self.assertNotEqual(value, f) class ExpressionsTests(TestCase): def test_F_reuse(self): f = F('id') n = Number.objects.create(integer=-1) c = Company.objects.create( name="Example Inc.", num_employees=2300, num_chairs=5, ceo=Employee.objects.create(firstname="Joe", lastname="Smith") ) c_qs = Company.objects.filter(id=f) self.assertEqual(c_qs.get(), c) # Reuse the same F-object for another queryset n_qs = Number.objects.filter(id=f) self.assertEqual(n_qs.get(), n) # The original query still works correctly self.assertEqual(c_qs.get(), c) def test_patterns_escape(self): r""" Special characters (e.g. %, _ and \) stored in database are properly escaped when using a pattern lookup with an expression refs #16731 """ Employee.objects.bulk_create([ Employee(firstname="%Joh\\nny", lastname="%Joh\\n"), Employee(firstname="Johnny", lastname="%John"), Employee(firstname="Jean-Claude", lastname="Claud_"), Employee(firstname="Jean-Claude", lastname="Claude"), Employee(firstname="Jean-Claude", lastname="Claude%"), Employee(firstname="Johnny", lastname="Joh\\n"), Employee(firstname="Johnny", lastname="John"), Employee(firstname="Johnny", lastname="_ohn"), ]) self.assertQuerysetEqual( Employee.objects.filter(firstname__contains=F('lastname')), ["<Employee: %Joh\\nny %Joh\\n>", "<Employee: Jean-Claude Claude>", "<Employee: Johnny John>"], ordered=False, ) self.assertQuerysetEqual( Employee.objects.filter(firstname__startswith=F('lastname')), ["<Employee: %Joh\\nny %Joh\\n>", "<Employee: Johnny John>"], ordered=False, ) self.assertQuerysetEqual( Employee.objects.filter(firstname__endswith=F('lastname')), ["<Employee: Jean-Claude Claude>"], ordered=False, ) def test_insensitive_patterns_escape(self): r""" Special characters (e.g. %, _ and \) stored in database are properly escaped when using a case insensitive pattern lookup with an expression -- refs #16731 """ Employee.objects.bulk_create([ Employee(firstname="%Joh\\nny", lastname="%joh\\n"), Employee(firstname="Johnny", lastname="%john"), Employee(firstname="Jean-Claude", lastname="claud_"), Employee(firstname="Jean-Claude", lastname="claude"), Employee(firstname="Jean-Claude", lastname="claude%"), Employee(firstname="Johnny", lastname="joh\\n"), Employee(firstname="Johnny", lastname="john"), Employee(firstname="Johnny", lastname="_ohn"), ]) self.assertQuerysetEqual( Employee.objects.filter(firstname__icontains=F('lastname')), ["<Employee: %Joh\\nny %joh\\n>", "<Employee: Jean-Claude claude>", "<Employee: Johnny john>"], ordered=False, ) self.assertQuerysetEqual( Employee.objects.filter(firstname__istartswith=F('lastname')), ["<Employee: %Joh\\nny %joh\\n>", "<Employee: Johnny john>"], ordered=False, ) self.assertQuerysetEqual( Employee.objects.filter(firstname__iendswith=F('lastname')), ["<Employee: Jean-Claude claude>"], ordered=False, ) @isolate_apps('expressions') class SimpleExpressionTests(SimpleTestCase): def test_equal(self): self.assertEqual(Expression(), Expression()) self.assertEqual( Expression(models.IntegerField()), Expression(output_field=models.IntegerField()) ) self.assertNotEqual( Expression(models.IntegerField()), Expression(models.CharField()) ) class TestModel(models.Model): field = models.IntegerField() other_field = models.IntegerField() self.assertNotEqual( Expression(TestModel._meta.get_field('field')), Expression(TestModel._meta.get_field('other_field')), ) def test_hash(self): self.assertEqual(hash(Expression()), hash(Expression())) self.assertEqual( hash(Expression(models.IntegerField())), hash(Expression(output_field=models.IntegerField())) ) self.assertNotEqual( hash(Expression(models.IntegerField())), hash(Expression(models.CharField())), ) class TestModel(models.Model): field = models.IntegerField() other_field = models.IntegerField() self.assertNotEqual( hash(Expression(TestModel._meta.get_field('field'))), hash(Expression(TestModel._meta.get_field('other_field'))), ) class ExpressionsNumericTests(TestCase): @classmethod def setUpTestData(cls): Number(integer=-1).save() Number(integer=42).save() Number(integer=1337).save() Number.objects.update(float=F('integer')) def test_fill_with_value_from_same_object(self): """ We can fill a value in all objects with an other value of the same object. """ self.assertQuerysetEqual( Number.objects.all(), ['<Number: -1, -1.000>', '<Number: 42, 42.000>', '<Number: 1337, 1337.000>'], ordered=False ) def test_increment_value(self): """ We can increment a value of all objects in a query set. """ self.assertEqual(Number.objects.filter(integer__gt=0).update(integer=F('integer') + 1), 2) self.assertQuerysetEqual( Number.objects.all(), ['<Number: -1, -1.000>', '<Number: 43, 42.000>', '<Number: 1338, 1337.000>'], ordered=False ) def test_filter_not_equals_other_field(self): """ We can filter for objects, where a value is not equals the value of an other field. """ self.assertEqual(Number.objects.filter(integer__gt=0).update(integer=F('integer') + 1), 2) self.assertQuerysetEqual( Number.objects.exclude(float=F('integer')), ['<Number: 43, 42.000>', '<Number: 1338, 1337.000>'], ordered=False ) def test_complex_expressions(self): """ Complex expressions of different connection types are possible. """ n = Number.objects.create(integer=10, float=123.45) self.assertEqual(Number.objects.filter(pk=n.pk).update( float=F('integer') + F('float') * 2), 1) self.assertEqual(Number.objects.get(pk=n.pk).integer, 10) self.assertEqual(Number.objects.get(pk=n.pk).float, Approximate(256.900, places=3)) class ExpressionOperatorTests(TestCase): @classmethod def setUpTestData(cls): cls.n = Number.objects.create(integer=42, float=15.5) cls.n1 = Number.objects.create(integer=-42, float=-15.5) def test_lefthand_addition(self): # LH Addition of floats and integers Number.objects.filter(pk=self.n.pk).update( integer=F('integer') + 15, float=F('float') + 42.7 ) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 57) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(58.200, places=3)) def test_lefthand_subtraction(self): # LH Subtraction of floats and integers Number.objects.filter(pk=self.n.pk).update(integer=F('integer') - 15, float=F('float') - 42.7) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 27) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(-27.200, places=3)) def test_lefthand_multiplication(self): # Multiplication of floats and integers Number.objects.filter(pk=self.n.pk).update(integer=F('integer') * 15, float=F('float') * 42.7) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 630) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(661.850, places=3)) def test_lefthand_division(self): # LH Division of floats and integers Number.objects.filter(pk=self.n.pk).update(integer=F('integer') / 2, float=F('float') / 42.7) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 21) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(0.363, places=3)) def test_lefthand_modulo(self): # LH Modulo arithmetic on integers Number.objects.filter(pk=self.n.pk).update(integer=F('integer') % 20) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 2) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(15.500, places=3)) def test_lefthand_bitwise_and(self): # LH Bitwise ands on integers Number.objects.filter(pk=self.n.pk).update(integer=F('integer').bitand(56)) Number.objects.filter(pk=self.n1.pk).update(integer=F('integer').bitand(-56)) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 40) self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -64) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(15.500, places=3)) def test_lefthand_bitwise_left_shift_operator(self): Number.objects.update(integer=F('integer').bitleftshift(2)) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 168) self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -168) def test_lefthand_bitwise_right_shift_operator(self): Number.objects.update(integer=F('integer').bitrightshift(2)) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 10) self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -11) def test_lefthand_bitwise_or(self): # LH Bitwise or on integers Number.objects.update(integer=F('integer').bitor(48)) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 58) self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -10) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(15.500, places=3)) def test_lefthand_power(self): # LH Powert arithmetic operation on floats and integers Number.objects.filter(pk=self.n.pk).update(integer=F('integer') ** 2, float=F('float') ** 1.5) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 1764) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(61.02, places=2)) def test_right_hand_addition(self): # Right hand operators Number.objects.filter(pk=self.n.pk).update(integer=15 + F('integer'), float=42.7 + F('float')) # RH Addition of floats and integers self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 57) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(58.200, places=3)) def test_right_hand_subtraction(self): Number.objects.filter(pk=self.n.pk).update(integer=15 - F('integer'), float=42.7 - F('float')) # RH Subtraction of floats and integers self.assertEqual(Number.objects.get(pk=self.n.pk).integer, -27) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(27.200, places=3)) def test_right_hand_multiplication(self): # RH Multiplication of floats and integers Number.objects.filter(pk=self.n.pk).update(integer=15 * F('integer'), float=42.7 * F('float')) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 630) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(661.850, places=3)) def test_right_hand_division(self): # RH Division of floats and integers Number.objects.filter(pk=self.n.pk).update(integer=640 / F('integer'), float=42.7 / F('float')) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 15) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(2.755, places=3)) def test_right_hand_modulo(self): # RH Modulo arithmetic on integers Number.objects.filter(pk=self.n.pk).update(integer=69 % F('integer')) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 27) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(15.500, places=3)) def test_righthand_power(self): # RH Powert arithmetic operation on floats and integers Number.objects.filter(pk=self.n.pk).update(integer=2 ** F('integer'), float=1.5 ** F('float')) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 4398046511104) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(536.308, places=3)) class FTimeDeltaTests(TestCase): @classmethod def setUpTestData(cls): cls.sday = sday = datetime.date(2010, 6, 25) cls.stime = stime = datetime.datetime(2010, 6, 25, 12, 15, 30, 747000) midnight = datetime.time(0) delta0 = datetime.timedelta(0) delta1 = datetime.timedelta(microseconds=253000) delta2 = datetime.timedelta(seconds=44) delta3 = datetime.timedelta(hours=21, minutes=8) delta4 = datetime.timedelta(days=10) delta5 = datetime.timedelta(days=90) # Test data is set so that deltas and delays will be # strictly increasing. cls.deltas = [] cls.delays = [] cls.days_long = [] # e0: started same day as assigned, zero duration end = stime + delta0 e0 = Experiment.objects.create( name='e0', assigned=sday, start=stime, end=end, completed=end.date(), estimated_time=delta0, ) cls.deltas.append(delta0) cls.delays.append(e0.start - datetime.datetime.combine(e0.assigned, midnight)) cls.days_long.append(e0.completed - e0.assigned) # e1: started one day after assigned, tiny duration, data # set so that end time has no fractional seconds, which # tests an edge case on sqlite. delay = datetime.timedelta(1) end = stime + delay + delta1 e1 = Experiment.objects.create( name='e1', assigned=sday, start=stime + delay, end=end, completed=end.date(), estimated_time=delta1, ) cls.deltas.append(delta1) cls.delays.append(e1.start - datetime.datetime.combine(e1.assigned, midnight)) cls.days_long.append(e1.completed - e1.assigned) # e2: started three days after assigned, small duration end = stime + delta2 e2 = Experiment.objects.create( name='e2', assigned=sday - datetime.timedelta(3), start=stime, end=end, completed=end.date(), estimated_time=datetime.timedelta(hours=1), ) cls.deltas.append(delta2) cls.delays.append(e2.start - datetime.datetime.combine(e2.assigned, midnight)) cls.days_long.append(e2.completed - e2.assigned) # e3: started four days after assigned, medium duration delay = datetime.timedelta(4) end = stime + delay + delta3 e3 = Experiment.objects.create( name='e3', assigned=sday, start=stime + delay, end=end, completed=end.date(), estimated_time=delta3, ) cls.deltas.append(delta3) cls.delays.append(e3.start - datetime.datetime.combine(e3.assigned, midnight)) cls.days_long.append(e3.completed - e3.assigned) # e4: started 10 days after assignment, long duration end = stime + delta4 e4 = Experiment.objects.create( name='e4', assigned=sday - datetime.timedelta(10), start=stime, end=end, completed=end.date(), estimated_time=delta4 - datetime.timedelta(1), ) cls.deltas.append(delta4) cls.delays.append(e4.start - datetime.datetime.combine(e4.assigned, midnight)) cls.days_long.append(e4.completed - e4.assigned) # e5: started a month after assignment, very long duration delay = datetime.timedelta(30) end = stime + delay + delta5 e5 = Experiment.objects.create( name='e5', assigned=sday, start=stime + delay, end=end, completed=end.date(), estimated_time=delta5, ) cls.deltas.append(delta5) cls.delays.append(e5.start - datetime.datetime.combine(e5.assigned, midnight)) cls.days_long.append(e5.completed - e5.assigned) cls.expnames = [e.name for e in Experiment.objects.all()] def test_multiple_query_compilation(self): # Ticket #21643 queryset = Experiment.objects.filter(end__lt=F('start') + datetime.timedelta(hours=1)) q1 = str(queryset.query) q2 = str(queryset.query) self.assertEqual(q1, q2) def test_query_clone(self): # Ticket #21643 - Crash when compiling query more than once qs = Experiment.objects.filter(end__lt=F('start') + datetime.timedelta(hours=1)) qs2 = qs.all() list(qs) list(qs2) # Intentionally no assert def test_delta_add(self): for i, delta in enumerate(self.deltas): test_set = [e.name for e in Experiment.objects.filter(end__lt=F('start') + delta)] self.assertEqual(test_set, self.expnames[:i]) test_set = [e.name for e in Experiment.objects.filter(end__lt=delta + F('start'))] self.assertEqual(test_set, self.expnames[:i]) test_set = [e.name for e in Experiment.objects.filter(end__lte=F('start') + delta)] self.assertEqual(test_set, self.expnames[:i + 1]) def test_delta_subtract(self): for i, delta in enumerate(self.deltas): test_set = [e.name for e in Experiment.objects.filter(start__gt=F('end') - delta)] self.assertEqual(test_set, self.expnames[:i]) test_set = [e.name for e in Experiment.objects.filter(start__gte=F('end') - delta)] self.assertEqual(test_set, self.expnames[:i + 1]) def test_exclude(self): for i, delta in enumerate(self.deltas): test_set = [e.name for e in Experiment.objects.exclude(end__lt=F('start') + delta)] self.assertEqual(test_set, self.expnames[i:]) test_set = [e.name for e in Experiment.objects.exclude(end__lte=F('start') + delta)] self.assertEqual(test_set, self.expnames[i + 1:]) def test_date_comparison(self): for i, days in enumerate(self.days_long): test_set = [e.name for e in Experiment.objects.filter(completed__lt=F('assigned') + days)] self.assertEqual(test_set, self.expnames[:i]) test_set = [e.name for e in Experiment.objects.filter(completed__lte=F('assigned') + days)] self.assertEqual(test_set, self.expnames[:i + 1]) @skipUnlessDBFeature("supports_mixed_date_datetime_comparisons") def test_mixed_comparisons1(self): for i, delay in enumerate(self.delays): test_set = [e.name for e in Experiment.objects.filter(assigned__gt=F('start') - delay)] self.assertEqual(test_set, self.expnames[:i]) test_set = [e.name for e in Experiment.objects.filter(assigned__gte=F('start') - delay)] self.assertEqual(test_set, self.expnames[:i + 1]) def test_mixed_comparisons2(self): for i, delay in enumerate(self.delays): delay = datetime.timedelta(delay.days) test_set = [e.name for e in Experiment.objects.filter(start__lt=F('assigned') + delay)] self.assertEqual(test_set, self.expnames[:i]) test_set = [ e.name for e in Experiment.objects.filter(start__lte=F('assigned') + delay + datetime.timedelta(1)) ] self.assertEqual(test_set, self.expnames[:i + 1]) def test_delta_update(self): for delta in self.deltas: exps = Experiment.objects.all() expected_durations = [e.duration() for e in exps] expected_starts = [e.start + delta for e in exps] expected_ends = [e.end + delta for e in exps] Experiment.objects.update(start=F('start') + delta, end=F('end') + delta) exps = Experiment.objects.all() new_starts = [e.start for e in exps] new_ends = [e.end for e in exps] new_durations = [e.duration() for e in exps] self.assertEqual(expected_starts, new_starts) self.assertEqual(expected_ends, new_ends) self.assertEqual(expected_durations, new_durations) def test_invalid_operator(self): with self.assertRaises(DatabaseError): list(Experiment.objects.filter(start=F('start') * datetime.timedelta(0))) def test_durationfield_add(self): zeros = [e.name for e in Experiment.objects.filter(start=F('start') + F('estimated_time'))] self.assertEqual(zeros, ['e0']) end_less = [e.name for e in Experiment.objects.filter(end__lt=F('start') + F('estimated_time'))] self.assertEqual(end_less, ['e2']) delta_math = [ e.name for e in Experiment.objects.filter(end__gte=F('start') + F('estimated_time') + datetime.timedelta(hours=1)) ] self.assertEqual(delta_math, ['e4']) queryset = Experiment.objects.annotate(shifted=ExpressionWrapper( F('start') + Value(None, output_field=models.DurationField()), output_field=models.DateTimeField(), )) self.assertIsNone(queryset.first().shifted) @skipUnlessDBFeature('supports_temporal_subtraction') def test_date_subtraction(self): queryset = Experiment.objects.annotate( completion_duration=ExpressionWrapper( F('completed') - F('assigned'), output_field=models.DurationField() ) ) at_least_5_days = {e.name for e in queryset.filter(completion_duration__gte=datetime.timedelta(days=5))} self.assertEqual(at_least_5_days, {'e3', 'e4', 'e5'}) at_least_120_days = {e.name for e in queryset.filter(completion_duration__gte=datetime.timedelta(days=120))} self.assertEqual(at_least_120_days, {'e5'}) less_than_5_days = {e.name for e in queryset.filter(completion_duration__lt=datetime.timedelta(days=5))} self.assertEqual(less_than_5_days, {'e0', 'e1', 'e2'}) queryset = Experiment.objects.annotate(difference=ExpressionWrapper( F('completed') - Value(None, output_field=models.DateField()), output_field=models.DurationField(), )) self.assertIsNone(queryset.first().difference) queryset = Experiment.objects.annotate(shifted=ExpressionWrapper( F('completed') - Value(None, output_field=models.DurationField()), output_field=models.DateField(), )) self.assertIsNone(queryset.first().shifted) @skipUnlessDBFeature('supports_temporal_subtraction') def test_time_subtraction(self): Time.objects.create(time=datetime.time(12, 30, 15, 2345)) queryset = Time.objects.annotate( difference=ExpressionWrapper( F('time') - Value(datetime.time(11, 15, 0), output_field=models.TimeField()), output_field=models.DurationField(), ) ) self.assertEqual( queryset.get().difference, datetime.timedelta(hours=1, minutes=15, seconds=15, microseconds=2345) ) queryset = Time.objects.annotate(difference=ExpressionWrapper( F('time') - Value(None, output_field=models.TimeField()), output_field=models.DurationField(), )) self.assertIsNone(queryset.first().difference) queryset = Time.objects.annotate(shifted=ExpressionWrapper( F('time') - Value(None, output_field=models.DurationField()), output_field=models.TimeField(), )) self.assertIsNone(queryset.first().shifted) @skipUnlessDBFeature('supports_temporal_subtraction') def test_datetime_subtraction(self): under_estimate = [ e.name for e in Experiment.objects.filter(estimated_time__gt=F('end') - F('start')) ] self.assertEqual(under_estimate, ['e2']) over_estimate = [ e.name for e in Experiment.objects.filter(estimated_time__lt=F('end') - F('start')) ] self.assertEqual(over_estimate, ['e4']) queryset = Experiment.objects.annotate(difference=ExpressionWrapper( F('start') - Value(None, output_field=models.DateTimeField()), output_field=models.DurationField(), )) self.assertIsNone(queryset.first().difference) queryset = Experiment.objects.annotate(shifted=ExpressionWrapper( F('start') - Value(None, output_field=models.DurationField()), output_field=models.DateTimeField(), )) self.assertIsNone(queryset.first().shifted) @skipUnlessDBFeature('supports_temporal_subtraction') def test_datetime_subtraction_microseconds(self): delta = datetime.timedelta(microseconds=8999999999999999) Experiment.objects.update(end=F('start') + delta) qs = Experiment.objects.annotate( delta=ExpressionWrapper(F('end') - F('start'), output_field=models.DurationField()) ) for e in qs: self.assertEqual(e.delta, delta) def test_duration_with_datetime(self): # Exclude e1 which has very high precision so we can test this on all # backends regardless of whether or not it supports # microsecond_precision. over_estimate = Experiment.objects.exclude(name='e1').filter( completed__gt=self.stime + F('estimated_time'), ).order_by('name') self.assertQuerysetEqual(over_estimate, ['e3', 'e4', 'e5'], lambda e: e.name) def test_duration_with_datetime_microseconds(self): delta = datetime.timedelta(microseconds=8999999999999999) qs = Experiment.objects.annotate(dt=ExpressionWrapper( F('start') + delta, output_field=models.DateTimeField(), )) for e in qs: self.assertEqual(e.dt, e.start + delta) def test_date_minus_duration(self): more_than_4_days = Experiment.objects.filter( assigned__lt=F('completed') - Value(datetime.timedelta(days=4), output_field=models.DurationField()) ) self.assertQuerysetEqual(more_than_4_days, ['e3', 'e4', 'e5'], lambda e: e.name) def test_negative_timedelta_update(self): # subtract 30 seconds, 30 minutes, 2 hours and 2 days experiments = Experiment.objects.filter(name='e0').annotate( start_sub_seconds=F('start') + datetime.timedelta(seconds=-30), ).annotate( start_sub_minutes=F('start_sub_seconds') + datetime.timedelta(minutes=-30), ).annotate( start_sub_hours=F('start_sub_minutes') + datetime.timedelta(hours=-2), ).annotate( new_start=F('start_sub_hours') + datetime.timedelta(days=-2), ) expected_start = datetime.datetime(2010, 6, 23, 9, 45, 0) # subtract 30 microseconds experiments = experiments.annotate(new_start=F('new_start') + datetime.timedelta(microseconds=-30)) expected_start += datetime.timedelta(microseconds=+746970) experiments.update(start=F('new_start')) e0 = Experiment.objects.get(name='e0') self.assertEqual(e0.start, expected_start) class ValueTests(TestCase): def test_update_TimeField_using_Value(self): Time.objects.create() Time.objects.update(time=Value(datetime.time(1), output_field=TimeField())) self.assertEqual(Time.objects.get().time, datetime.time(1)) def test_update_UUIDField_using_Value(self): UUID.objects.create() UUID.objects.update(uuid=Value(uuid.UUID('12345678901234567890123456789012'), output_field=UUIDField())) self.assertEqual(UUID.objects.get().uuid, uuid.UUID('12345678901234567890123456789012')) def test_deconstruct(self): value = Value('name') path, args, kwargs = value.deconstruct() self.assertEqual(path, 'django.db.models.expressions.Value') self.assertEqual(args, (value.value,)) self.assertEqual(kwargs, {}) def test_deconstruct_output_field(self): value = Value('name', output_field=CharField()) path, args, kwargs = value.deconstruct() self.assertEqual(path, 'django.db.models.expressions.Value') self.assertEqual(args, (value.value,)) self.assertEqual(len(kwargs), 1) self.assertEqual(kwargs['output_field'].deconstruct(), CharField().deconstruct()) def test_equal(self): value = Value('name') self.assertEqual(value, Value('name')) self.assertNotEqual(value, Value('username')) def test_hash(self): d = {Value('name'): 'Bob'} self.assertIn(Value('name'), d) self.assertEqual(d[Value('name')], 'Bob') def test_equal_output_field(self): value = Value('name', output_field=CharField()) same_value = Value('name', output_field=CharField()) other_value = Value('name', output_field=TimeField()) no_output_field = Value('name') self.assertEqual(value, same_value) self.assertNotEqual(value, other_value) self.assertNotEqual(value, no_output_field) def test_raise_empty_expressionlist(self): msg = 'ExpressionList requires at least one expression' with self.assertRaisesMessage(ValueError, msg): ExpressionList() class FieldTransformTests(TestCase): @classmethod def setUpTestData(cls): cls.sday = sday = datetime.date(2010, 6, 25) cls.stime = stime = datetime.datetime(2010, 6, 25, 12, 15, 30, 747000) cls.ex1 = Experiment.objects.create( name='Experiment 1', assigned=sday, completed=sday + datetime.timedelta(2), estimated_time=datetime.timedelta(2), start=stime, end=stime + datetime.timedelta(2), ) def test_month_aggregation(self): self.assertEqual( Experiment.objects.aggregate(month_count=Count('assigned__month')), {'month_count': 1} ) def test_transform_in_values(self): self.assertQuerysetEqual( Experiment.objects.values('assigned__month'), ["{'assigned__month': 6}"] ) def test_multiple_transforms_in_values(self): self.assertQuerysetEqual( Experiment.objects.values('end__date__month'), ["{'end__date__month': 6}"] ) class ReprTests(SimpleTestCase): def test_expressions(self): self.assertEqual( repr(Case(When(a=1))), "<Case: CASE WHEN <Q: (AND: ('a', 1))> THEN Value(None), ELSE Value(None)>" ) self.assertEqual( repr(When(Q(age__gte=18), then=Value('legal'))), "<When: WHEN <Q: (AND: ('age__gte', 18))> THEN Value(legal)>" ) self.assertEqual(repr(Col('alias', 'field')), "Col(alias, field)") self.assertEqual(repr(F('published')), "F(published)") self.assertEqual(repr(F('cost') + F('tax')), "<CombinedExpression: F(cost) + F(tax)>") self.assertEqual( repr(ExpressionWrapper(F('cost') + F('tax'), models.IntegerField())), "ExpressionWrapper(F(cost) + F(tax))" ) self.assertEqual(repr(Func('published', function='TO_CHAR')), "Func(F(published), function=TO_CHAR)") self.assertEqual(repr(OrderBy(Value(1))), 'OrderBy(Value(1), descending=False)') self.assertEqual(repr(Random()), "Random()") self.assertEqual(repr(RawSQL('table.col', [])), "RawSQL(table.col, [])") self.assertEqual(repr(Ref('sum_cost', Sum('cost'))), "Ref(sum_cost, Sum(F(cost)))") self.assertEqual(repr(Value(1)), "Value(1)") self.assertEqual( repr(ExpressionList(F('col'), F('anothercol'))), 'ExpressionList(F(col), F(anothercol))' ) self.assertEqual( repr(ExpressionList(OrderBy(F('col'), descending=False))), 'ExpressionList(OrderBy(F(col), descending=False))' ) def test_functions(self): self.assertEqual(repr(Coalesce('a', 'b')), "Coalesce(F(a), F(b))") self.assertEqual(repr(Concat('a', 'b')), "Concat(ConcatPair(F(a), F(b)))") self.assertEqual(repr(Length('a')), "Length(F(a))") self.assertEqual(repr(Lower('a')), "Lower(F(a))") self.assertEqual(repr(Substr('a', 1, 3)), "Substr(F(a), Value(1), Value(3))") self.assertEqual(repr(Upper('a')), "Upper(F(a))") def test_aggregates(self): self.assertEqual(repr(Avg('a')), "Avg(F(a))") self.assertEqual(repr(Count('a')), "Count(F(a))") self.assertEqual(repr(Count('*')), "Count('*')") self.assertEqual(repr(Max('a')), "Max(F(a))") self.assertEqual(repr(Min('a')), "Min(F(a))") self.assertEqual(repr(StdDev('a')), "StdDev(F(a), sample=False)") self.assertEqual(repr(Sum('a')), "Sum(F(a))") self.assertEqual(repr(Variance('a', sample=True)), "Variance(F(a), sample=True)") def test_distinct_aggregates(self): self.assertEqual(repr(Count('a', distinct=True)), "Count(F(a), distinct=True)") self.assertEqual(repr(Count('*', distinct=True)), "Count('*', distinct=True)") def test_filtered_aggregates(self): filter = Q(a=1) self.assertEqual(repr(Avg('a', filter=filter)), "Avg(F(a), filter=(AND: ('a', 1)))") self.assertEqual(repr(Count('a', filter=filter)), "Count(F(a), filter=(AND: ('a', 1)))") self.assertEqual(repr(Max('a', filter=filter)), "Max(F(a), filter=(AND: ('a', 1)))") self.assertEqual(repr(Min('a', filter=filter)), "Min(F(a), filter=(AND: ('a', 1)))") self.assertEqual(repr(StdDev('a', filter=filter)), "StdDev(F(a), filter=(AND: ('a', 1)), sample=False)") self.assertEqual(repr(Sum('a', filter=filter)), "Sum(F(a), filter=(AND: ('a', 1)))") self.assertEqual( repr(Variance('a', sample=True, filter=filter)), "Variance(F(a), filter=(AND: ('a', 1)), sample=True)" ) self.assertEqual( repr(Count('a', filter=filter, distinct=True)), "Count(F(a), distinct=True, filter=(AND: ('a', 1)))" ) class CombinableTests(SimpleTestCase): bitwise_msg = 'Use .bitand() and .bitor() for bitwise logical operations.' def test_negation(self): c = Combinable() self.assertEqual(-c, c * -1) def test_and(self): with self.assertRaisesMessage(NotImplementedError, self.bitwise_msg): Combinable() & Combinable() def test_or(self): with self.assertRaisesMessage(NotImplementedError, self.bitwise_msg): Combinable() | Combinable() def test_reversed_and(self): with self.assertRaisesMessage(NotImplementedError, self.bitwise_msg): object() & Combinable() def test_reversed_or(self): with self.assertRaisesMessage(NotImplementedError, self.bitwise_msg): object() | Combinable()
728351206b66ed109fe50409c422221f00ef010e56895d981a84ba2e02eee143
from unittest import mock, skipUnless from django.db import connection from django.db.models import Index from django.db.utils import DatabaseError from django.test import TransactionTestCase, skipUnlessDBFeature from .models import ( Article, ArticleReporter, CheckConstraintModel, City, Comment, Country, District, Reporter, ) class IntrospectionTests(TransactionTestCase): available_apps = ['introspection'] def test_table_names(self): tl = connection.introspection.table_names() self.assertEqual(tl, sorted(tl)) self.assertIn(Reporter._meta.db_table, tl, "'%s' isn't in table_list()." % Reporter._meta.db_table) self.assertIn(Article._meta.db_table, tl, "'%s' isn't in table_list()." % Article._meta.db_table) def test_django_table_names(self): with connection.cursor() as cursor: cursor.execute('CREATE TABLE django_ixn_test_table (id INTEGER);') tl = connection.introspection.django_table_names() cursor.execute("DROP TABLE django_ixn_test_table;") self.assertNotIn('django_ixn_test_table', tl, "django_table_names() returned a non-Django table") def test_django_table_names_retval_type(self): # Table name is a list #15216 tl = connection.introspection.django_table_names(only_existing=True) self.assertIs(type(tl), list) tl = connection.introspection.django_table_names(only_existing=False) self.assertIs(type(tl), list) def test_table_names_with_views(self): with connection.cursor() as cursor: try: cursor.execute( 'CREATE VIEW introspection_article_view AS SELECT headline ' 'from introspection_article;') except DatabaseError as e: if 'insufficient privileges' in str(e): self.fail("The test user has no CREATE VIEW privileges") else: raise try: self.assertIn('introspection_article_view', connection.introspection.table_names(include_views=True)) self.assertNotIn('introspection_article_view', connection.introspection.table_names()) finally: with connection.cursor() as cursor: cursor.execute('DROP VIEW introspection_article_view') def test_unmanaged_through_model(self): tables = connection.introspection.django_table_names() self.assertNotIn(ArticleReporter._meta.db_table, tables) def test_installed_models(self): tables = [Article._meta.db_table, Reporter._meta.db_table] models = connection.introspection.installed_models(tables) self.assertEqual(models, {Article, Reporter}) def test_sequence_list(self): sequences = connection.introspection.sequence_list() reporter_seqs = [seq for seq in sequences if seq['table'] == Reporter._meta.db_table] self.assertEqual(len(reporter_seqs), 1, 'Reporter sequence not found in sequence_list()') self.assertEqual(reporter_seqs[0]['column'], 'id') def test_get_table_description_names(self): with connection.cursor() as cursor: desc = connection.introspection.get_table_description(cursor, Reporter._meta.db_table) self.assertEqual([r[0] for r in desc], [f.column for f in Reporter._meta.fields]) def test_get_table_description_types(self): with connection.cursor() as cursor: desc = connection.introspection.get_table_description(cursor, Reporter._meta.db_table) self.assertEqual( [connection.introspection.get_field_type(r[1], r) for r in desc], [ 'AutoField' if connection.features.can_introspect_autofield else 'IntegerField', 'CharField', 'CharField', 'CharField', 'BigIntegerField' if connection.features.can_introspect_big_integer_field else 'IntegerField', 'BinaryField' if connection.features.can_introspect_binary_field else 'TextField', 'SmallIntegerField' if connection.features.can_introspect_small_integer_field else 'IntegerField', 'DurationField' if connection.features.can_introspect_duration_field else 'BigIntegerField', ] ) def test_get_table_description_col_lengths(self): with connection.cursor() as cursor: desc = connection.introspection.get_table_description(cursor, Reporter._meta.db_table) self.assertEqual( [r[3] for r in desc if connection.introspection.get_field_type(r[1], r) == 'CharField'], [30, 30, 254] ) def test_get_table_description_nullable(self): with connection.cursor() as cursor: desc = connection.introspection.get_table_description(cursor, Reporter._meta.db_table) nullable_by_backend = connection.features.interprets_empty_strings_as_nulls self.assertEqual( [r[6] for r in desc], [False, nullable_by_backend, nullable_by_backend, nullable_by_backend, True, True, False, False] ) @skipUnlessDBFeature('can_introspect_autofield') def test_bigautofield(self): with connection.cursor() as cursor: desc = connection.introspection.get_table_description(cursor, City._meta.db_table) self.assertIn( connection.features.introspected_big_auto_field_type, [connection.introspection.get_field_type(r[1], r) for r in desc], ) @skipUnlessDBFeature('can_introspect_autofield') def test_smallautofield(self): with connection.cursor() as cursor: desc = connection.introspection.get_table_description(cursor, Country._meta.db_table) self.assertIn( connection.features.introspected_small_auto_field_type, [connection.introspection.get_field_type(r[1], r) for r in desc], ) # Regression test for #9991 - 'real' types in postgres @skipUnlessDBFeature('has_real_datatype') def test_postgresql_real_type(self): with connection.cursor() as cursor: cursor.execute("CREATE TABLE django_ixn_real_test_table (number REAL);") desc = connection.introspection.get_table_description(cursor, 'django_ixn_real_test_table') cursor.execute('DROP TABLE django_ixn_real_test_table;') self.assertEqual(connection.introspection.get_field_type(desc[0][1], desc[0]), 'FloatField') @skipUnlessDBFeature('can_introspect_foreign_keys') def test_get_relations(self): with connection.cursor() as cursor: relations = connection.introspection.get_relations(cursor, Article._meta.db_table) # That's {field_name: (field_name_other_table, other_table)} expected_relations = { 'reporter_id': ('id', Reporter._meta.db_table), 'response_to_id': ('id', Article._meta.db_table), } self.assertEqual(relations, expected_relations) # Removing a field shouldn't disturb get_relations (#17785) body = Article._meta.get_field('body') with connection.schema_editor() as editor: editor.remove_field(Article, body) with connection.cursor() as cursor: relations = connection.introspection.get_relations(cursor, Article._meta.db_table) with connection.schema_editor() as editor: editor.add_field(Article, body) self.assertEqual(relations, expected_relations) @skipUnless(connection.vendor == 'sqlite', "This is an sqlite-specific issue") def test_get_relations_alt_format(self): """ With SQLite, foreign keys can be added with different syntaxes and formatting. """ create_table_statements = [ "CREATE TABLE track(id, art_id INTEGER, FOREIGN KEY(art_id) REFERENCES {}(id));", "CREATE TABLE track(id, art_id INTEGER, FOREIGN KEY (art_id) REFERENCES {}(id));" ] for statement in create_table_statements: with connection.cursor() as cursor: cursor.fetchone = mock.Mock(return_value=[statement.format(Article._meta.db_table), 'table']) relations = connection.introspection.get_relations(cursor, 'mocked_table') self.assertEqual(relations, {'art_id': ('id', Article._meta.db_table)}) @skipUnlessDBFeature('can_introspect_foreign_keys') def test_get_key_columns(self): with connection.cursor() as cursor: key_columns = connection.introspection.get_key_columns(cursor, Article._meta.db_table) self.assertEqual(set(key_columns), { ('reporter_id', Reporter._meta.db_table, 'id'), ('response_to_id', Article._meta.db_table, 'id'), }) def test_get_primary_key_column(self): with connection.cursor() as cursor: primary_key_column = connection.introspection.get_primary_key_column(cursor, Article._meta.db_table) pk_fk_column = connection.introspection.get_primary_key_column(cursor, District._meta.db_table) self.assertEqual(primary_key_column, 'id') self.assertEqual(pk_fk_column, 'city_id') def test_get_constraints_index_types(self): with connection.cursor() as cursor: constraints = connection.introspection.get_constraints(cursor, Article._meta.db_table) index = {} index2 = {} for val in constraints.values(): if val['columns'] == ['headline', 'pub_date']: index = val if val['columns'] == ['headline', 'response_to_id', 'pub_date', 'reporter_id']: index2 = val self.assertEqual(index['type'], Index.suffix) self.assertEqual(index2['type'], Index.suffix) @skipUnlessDBFeature('supports_index_column_ordering') def test_get_constraints_indexes_orders(self): """ Indexes have the 'orders' key with a list of 'ASC'/'DESC' values. """ with connection.cursor() as cursor: constraints = connection.introspection.get_constraints(cursor, Article._meta.db_table) indexes_verified = 0 expected_columns = [ ['reporter_id'], ['headline', 'pub_date'], ['response_to_id'], ['headline', 'response_to_id', 'pub_date', 'reporter_id'], ] for val in constraints.values(): if val['index'] and not (val['primary_key'] or val['unique']): self.assertIn(val['columns'], expected_columns) self.assertEqual(val['orders'], ['ASC'] * len(val['columns'])) indexes_verified += 1 self.assertEqual(indexes_verified, 4) def test_get_constraints(self): def assertDetails(details, cols, primary_key=False, unique=False, index=False, check=False, foreign_key=None): # Different backends have different values for same constraints: # PRIMARY KEY UNIQUE CONSTRAINT UNIQUE INDEX # MySQL pk=1 uniq=1 idx=1 pk=0 uniq=1 idx=1 pk=0 uniq=1 idx=1 # PostgreSQL pk=1 uniq=1 idx=0 pk=0 uniq=1 idx=0 pk=0 uniq=1 idx=1 # SQLite pk=1 uniq=0 idx=0 pk=0 uniq=1 idx=0 pk=0 uniq=1 idx=1 if details['primary_key']: details['unique'] = True if details['unique']: details['index'] = False self.assertEqual(details['columns'], cols) self.assertEqual(details['primary_key'], primary_key) self.assertEqual(details['unique'], unique) self.assertEqual(details['index'], index) self.assertEqual(details['check'], check) self.assertEqual(details['foreign_key'], foreign_key) # Test custom constraints custom_constraints = { 'article_email_pub_date_uniq', 'email_pub_date_idx', } with connection.cursor() as cursor: constraints = connection.introspection.get_constraints(cursor, Comment._meta.db_table) if ( connection.features.supports_column_check_constraints and connection.features.can_introspect_check_constraints ): constraints.update( connection.introspection.get_constraints(cursor, CheckConstraintModel._meta.db_table) ) custom_constraints.add('up_votes_gte_0_check') assertDetails(constraints['up_votes_gte_0_check'], ['up_votes'], check=True) assertDetails(constraints['article_email_pub_date_uniq'], ['article_id', 'email', 'pub_date'], unique=True) assertDetails(constraints['email_pub_date_idx'], ['email', 'pub_date'], index=True) # Test field constraints field_constraints = set() for name, details in constraints.items(): if name in custom_constraints: continue elif details['columns'] == ['up_votes'] and details['check']: assertDetails(details, ['up_votes'], check=True) field_constraints.add(name) elif details['columns'] == ['ref'] and details['unique']: assertDetails(details, ['ref'], unique=True) field_constraints.add(name) elif details['columns'] == ['article_id'] and details['index']: assertDetails(details, ['article_id'], index=True) field_constraints.add(name) elif details['columns'] == ['id'] and details['primary_key']: assertDetails(details, ['id'], primary_key=True, unique=True) field_constraints.add(name) elif details['columns'] == ['article_id'] and details['foreign_key']: assertDetails(details, ['article_id'], foreign_key=('introspection_article', 'id')) field_constraints.add(name) elif details['check']: # Some databases (e.g. Oracle) include additional check # constraints. field_constraints.add(name) # All constraints are accounted for. self.assertEqual(constraints.keys() ^ (custom_constraints | field_constraints), set())
217ec3ea2767e0b21389d451acd5a5099cb9b18726df3805a1346914c2081487
from django.db import models class City(models.Model): id = models.BigAutoField(primary_key=True) name = models.CharField(max_length=50) def __str__(self): return self.name class Country(models.Model): id = models.SmallAutoField(primary_key=True) name = models.CharField(max_length=50) class District(models.Model): city = models.ForeignKey(City, models.CASCADE, primary_key=True) name = models.CharField(max_length=50) def __str__(self): return self.name class Reporter(models.Model): first_name = models.CharField(max_length=30) last_name = models.CharField(max_length=30) email = models.EmailField() facebook_user_id = models.BigIntegerField(null=True) raw_data = models.BinaryField(null=True) small_int = models.SmallIntegerField() interval = models.DurationField() class Meta: unique_together = ('first_name', 'last_name') def __str__(self): return "%s %s" % (self.first_name, self.last_name) class Article(models.Model): headline = models.CharField(max_length=100) pub_date = models.DateField() body = models.TextField(default='') reporter = models.ForeignKey(Reporter, models.CASCADE) response_to = models.ForeignKey('self', models.SET_NULL, null=True) unmanaged_reporters = models.ManyToManyField(Reporter, through='ArticleReporter', related_name='+') class Meta: ordering = ('headline',) index_together = [ ["headline", "pub_date"], ['headline', 'response_to', 'pub_date', 'reporter'], ] def __str__(self): return self.headline class ArticleReporter(models.Model): article = models.ForeignKey(Article, models.CASCADE) reporter = models.ForeignKey(Reporter, models.CASCADE) class Meta: managed = False class Comment(models.Model): ref = models.UUIDField(unique=True) article = models.ForeignKey(Article, models.CASCADE, db_index=True) email = models.EmailField() pub_date = models.DateTimeField() body = models.TextField() class Meta: constraints = [ models.UniqueConstraint(fields=['article', 'email', 'pub_date'], name='article_email_pub_date_uniq'), ] indexes = [ models.Index(fields=['email', 'pub_date'], name='email_pub_date_idx'), ] class CheckConstraintModel(models.Model): up_votes = models.PositiveIntegerField() class Meta: required_db_features = { 'supports_table_check_constraints', } constraints = [ models.CheckConstraint(name='up_votes_gte_0_check', check=models.Q(up_votes__gte=0)), ]
7982b8ef16f785eddb89a9cb9b16539f7eca4e5de5c81cb2dec5aabcb61b9d5f
import collections.abc from datetime import datetime from math import ceil from operator import attrgetter from django.core.exceptions import FieldError from django.db import connection from django.db.models.expressions import Exists, OuterRef from django.db.models.functions import Substr from django.test import TestCase, skipUnlessDBFeature from .models import ( Article, Author, Game, IsNullWithNoneAsRHS, Player, Season, Tag, ) class LookupTests(TestCase): @classmethod def setUpTestData(cls): # Create a few Authors. cls.au1 = Author.objects.create(name='Author 1', alias='a1') cls.au2 = Author.objects.create(name='Author 2', alias='a2') # Create a few Articles. cls.a1 = Article.objects.create( headline='Article 1', pub_date=datetime(2005, 7, 26), author=cls.au1, slug='a1', ) cls.a2 = Article.objects.create( headline='Article 2', pub_date=datetime(2005, 7, 27), author=cls.au1, slug='a2', ) cls.a3 = Article.objects.create( headline='Article 3', pub_date=datetime(2005, 7, 27), author=cls.au1, slug='a3', ) cls.a4 = Article.objects.create( headline='Article 4', pub_date=datetime(2005, 7, 28), author=cls.au1, slug='a4', ) cls.a5 = Article.objects.create( headline='Article 5', pub_date=datetime(2005, 8, 1, 9, 0), author=cls.au2, slug='a5', ) cls.a6 = Article.objects.create( headline='Article 6', pub_date=datetime(2005, 8, 1, 8, 0), author=cls.au2, slug='a6', ) cls.a7 = Article.objects.create( headline='Article 7', pub_date=datetime(2005, 7, 27), author=cls.au2, slug='a7', ) # Create a few Tags. cls.t1 = Tag.objects.create(name='Tag 1') cls.t1.articles.add(cls.a1, cls.a2, cls.a3) cls.t2 = Tag.objects.create(name='Tag 2') cls.t2.articles.add(cls.a3, cls.a4, cls.a5) cls.t3 = Tag.objects.create(name='Tag 3') cls.t3.articles.add(cls.a5, cls.a6, cls.a7) def test_exists(self): # We can use .exists() to check that there are some self.assertTrue(Article.objects.exists()) for a in Article.objects.all(): a.delete() # There should be none now! self.assertFalse(Article.objects.exists()) def test_lookup_int_as_str(self): # Integer value can be queried using string self.assertQuerysetEqual(Article.objects.filter(id__iexact=str(self.a1.id)), ['<Article: Article 1>']) @skipUnlessDBFeature('supports_date_lookup_using_string') def test_lookup_date_as_str(self): # A date lookup can be performed using a string search self.assertQuerysetEqual( Article.objects.filter(pub_date__startswith='2005'), [ '<Article: Article 5>', '<Article: Article 6>', '<Article: Article 4>', '<Article: Article 2>', '<Article: Article 3>', '<Article: Article 7>', '<Article: Article 1>', ] ) def test_iterator(self): # Each QuerySet gets iterator(), which is a generator that "lazily" # returns results using database-level iteration. self.assertIsInstance(Article.objects.iterator(), collections.abc.Iterator) self.assertQuerysetEqual( Article.objects.iterator(), [ 'Article 5', 'Article 6', 'Article 4', 'Article 2', 'Article 3', 'Article 7', 'Article 1', ], transform=attrgetter('headline') ) # iterator() can be used on any QuerySet. self.assertQuerysetEqual( Article.objects.filter(headline__endswith='4').iterator(), ['Article 4'], transform=attrgetter('headline')) def test_count(self): # count() returns the number of objects matching search criteria. self.assertEqual(Article.objects.count(), 7) self.assertEqual(Article.objects.filter(pub_date__exact=datetime(2005, 7, 27)).count(), 3) self.assertEqual(Article.objects.filter(headline__startswith='Blah blah').count(), 0) # count() should respect sliced query sets. articles = Article.objects.all() self.assertEqual(articles.count(), 7) self.assertEqual(articles[:4].count(), 4) self.assertEqual(articles[1:100].count(), 6) self.assertEqual(articles[10:100].count(), 0) # Date and date/time lookups can also be done with strings. self.assertEqual(Article.objects.filter(pub_date__exact='2005-07-27 00:00:00').count(), 3) def test_in_bulk(self): # in_bulk() takes a list of IDs and returns a dictionary mapping IDs to objects. arts = Article.objects.in_bulk([self.a1.id, self.a2.id]) self.assertEqual(arts[self.a1.id], self.a1) self.assertEqual(arts[self.a2.id], self.a2) self.assertEqual( Article.objects.in_bulk(), { self.a1.id: self.a1, self.a2.id: self.a2, self.a3.id: self.a3, self.a4.id: self.a4, self.a5.id: self.a5, self.a6.id: self.a6, self.a7.id: self.a7, } ) self.assertEqual(Article.objects.in_bulk([self.a3.id]), {self.a3.id: self.a3}) self.assertEqual(Article.objects.in_bulk({self.a3.id}), {self.a3.id: self.a3}) self.assertEqual(Article.objects.in_bulk(frozenset([self.a3.id])), {self.a3.id: self.a3}) self.assertEqual(Article.objects.in_bulk((self.a3.id,)), {self.a3.id: self.a3}) self.assertEqual(Article.objects.in_bulk([1000]), {}) self.assertEqual(Article.objects.in_bulk([]), {}) self.assertEqual(Article.objects.in_bulk(iter([self.a1.id])), {self.a1.id: self.a1}) self.assertEqual(Article.objects.in_bulk(iter([])), {}) with self.assertRaises(TypeError): Article.objects.in_bulk(headline__startswith='Blah') def test_in_bulk_lots_of_ids(self): test_range = 2000 max_query_params = connection.features.max_query_params expected_num_queries = ceil(test_range / max_query_params) if max_query_params else 1 Author.objects.bulk_create([Author() for i in range(test_range - Author.objects.count())]) authors = {author.pk: author for author in Author.objects.all()} with self.assertNumQueries(expected_num_queries): self.assertEqual(Author.objects.in_bulk(authors), authors) def test_in_bulk_with_field(self): self.assertEqual( Article.objects.in_bulk([self.a1.slug, self.a2.slug, self.a3.slug], field_name='slug'), { self.a1.slug: self.a1, self.a2.slug: self.a2, self.a3.slug: self.a3, } ) def test_in_bulk_non_unique_field(self): msg = "in_bulk()'s field_name must be a unique field but 'author' isn't." with self.assertRaisesMessage(ValueError, msg): Article.objects.in_bulk([self.au1], field_name='author') def test_values(self): # values() returns a list of dictionaries instead of object instances -- # and you can specify which fields you want to retrieve. self.assertSequenceEqual( Article.objects.values('headline'), [ {'headline': 'Article 5'}, {'headline': 'Article 6'}, {'headline': 'Article 4'}, {'headline': 'Article 2'}, {'headline': 'Article 3'}, {'headline': 'Article 7'}, {'headline': 'Article 1'}, ], ) self.assertSequenceEqual( Article.objects.filter(pub_date__exact=datetime(2005, 7, 27)).values('id'), [{'id': self.a2.id}, {'id': self.a3.id}, {'id': self.a7.id}], ) self.assertSequenceEqual( Article.objects.values('id', 'headline'), [ {'id': self.a5.id, 'headline': 'Article 5'}, {'id': self.a6.id, 'headline': 'Article 6'}, {'id': self.a4.id, 'headline': 'Article 4'}, {'id': self.a2.id, 'headline': 'Article 2'}, {'id': self.a3.id, 'headline': 'Article 3'}, {'id': self.a7.id, 'headline': 'Article 7'}, {'id': self.a1.id, 'headline': 'Article 1'}, ], ) # You can use values() with iterator() for memory savings, # because iterator() uses database-level iteration. self.assertSequenceEqual( list(Article.objects.values('id', 'headline').iterator()), [ {'headline': 'Article 5', 'id': self.a5.id}, {'headline': 'Article 6', 'id': self.a6.id}, {'headline': 'Article 4', 'id': self.a4.id}, {'headline': 'Article 2', 'id': self.a2.id}, {'headline': 'Article 3', 'id': self.a3.id}, {'headline': 'Article 7', 'id': self.a7.id}, {'headline': 'Article 1', 'id': self.a1.id}, ], ) # The values() method works with "extra" fields specified in extra(select). self.assertSequenceEqual( Article.objects.extra(select={'id_plus_one': 'id + 1'}).values('id', 'id_plus_one'), [ {'id': self.a5.id, 'id_plus_one': self.a5.id + 1}, {'id': self.a6.id, 'id_plus_one': self.a6.id + 1}, {'id': self.a4.id, 'id_plus_one': self.a4.id + 1}, {'id': self.a2.id, 'id_plus_one': self.a2.id + 1}, {'id': self.a3.id, 'id_plus_one': self.a3.id + 1}, {'id': self.a7.id, 'id_plus_one': self.a7.id + 1}, {'id': self.a1.id, 'id_plus_one': self.a1.id + 1}, ], ) data = { 'id_plus_one': 'id+1', 'id_plus_two': 'id+2', 'id_plus_three': 'id+3', 'id_plus_four': 'id+4', 'id_plus_five': 'id+5', 'id_plus_six': 'id+6', 'id_plus_seven': 'id+7', 'id_plus_eight': 'id+8', } self.assertSequenceEqual( Article.objects.filter(id=self.a1.id).extra(select=data).values(*data), [{ 'id_plus_one': self.a1.id + 1, 'id_plus_two': self.a1.id + 2, 'id_plus_three': self.a1.id + 3, 'id_plus_four': self.a1.id + 4, 'id_plus_five': self.a1.id + 5, 'id_plus_six': self.a1.id + 6, 'id_plus_seven': self.a1.id + 7, 'id_plus_eight': self.a1.id + 8, }], ) # You can specify fields from forward and reverse relations, just like filter(). self.assertSequenceEqual( Article.objects.values('headline', 'author__name'), [ {'headline': self.a5.headline, 'author__name': self.au2.name}, {'headline': self.a6.headline, 'author__name': self.au2.name}, {'headline': self.a4.headline, 'author__name': self.au1.name}, {'headline': self.a2.headline, 'author__name': self.au1.name}, {'headline': self.a3.headline, 'author__name': self.au1.name}, {'headline': self.a7.headline, 'author__name': self.au2.name}, {'headline': self.a1.headline, 'author__name': self.au1.name}, ], ) self.assertSequenceEqual( Author.objects.values('name', 'article__headline').order_by('name', 'article__headline'), [ {'name': self.au1.name, 'article__headline': self.a1.headline}, {'name': self.au1.name, 'article__headline': self.a2.headline}, {'name': self.au1.name, 'article__headline': self.a3.headline}, {'name': self.au1.name, 'article__headline': self.a4.headline}, {'name': self.au2.name, 'article__headline': self.a5.headline}, {'name': self.au2.name, 'article__headline': self.a6.headline}, {'name': self.au2.name, 'article__headline': self.a7.headline}, ], ) self.assertSequenceEqual( ( Author.objects .values('name', 'article__headline', 'article__tag__name') .order_by('name', 'article__headline', 'article__tag__name') ), [ {'name': self.au1.name, 'article__headline': self.a1.headline, 'article__tag__name': self.t1.name}, {'name': self.au1.name, 'article__headline': self.a2.headline, 'article__tag__name': self.t1.name}, {'name': self.au1.name, 'article__headline': self.a3.headline, 'article__tag__name': self.t1.name}, {'name': self.au1.name, 'article__headline': self.a3.headline, 'article__tag__name': self.t2.name}, {'name': self.au1.name, 'article__headline': self.a4.headline, 'article__tag__name': self.t2.name}, {'name': self.au2.name, 'article__headline': self.a5.headline, 'article__tag__name': self.t2.name}, {'name': self.au2.name, 'article__headline': self.a5.headline, 'article__tag__name': self.t3.name}, {'name': self.au2.name, 'article__headline': self.a6.headline, 'article__tag__name': self.t3.name}, {'name': self.au2.name, 'article__headline': self.a7.headline, 'article__tag__name': self.t3.name}, ], ) # However, an exception FieldDoesNotExist will be thrown if you specify # a nonexistent field name in values() (a field that is neither in the # model nor in extra(select)). msg = ( "Cannot resolve keyword 'id_plus_two' into field. Choices are: " "author, author_id, headline, id, id_plus_one, pub_date, slug, tag" ) with self.assertRaisesMessage(FieldError, msg): Article.objects.extra(select={'id_plus_one': 'id + 1'}).values('id', 'id_plus_two') # If you don't specify field names to values(), all are returned. self.assertSequenceEqual( Article.objects.filter(id=self.a5.id).values(), [{ 'id': self.a5.id, 'author_id': self.au2.id, 'headline': 'Article 5', 'pub_date': datetime(2005, 8, 1, 9, 0), 'slug': 'a5', }], ) def test_values_list(self): # values_list() is similar to values(), except that the results are # returned as a list of tuples, rather than a list of dictionaries. # Within each tuple, the order of the elements is the same as the order # of fields in the values_list() call. self.assertSequenceEqual( Article.objects.values_list('headline'), [ ('Article 5',), ('Article 6',), ('Article 4',), ('Article 2',), ('Article 3',), ('Article 7',), ('Article 1',), ], ) self.assertSequenceEqual( Article.objects.values_list('id').order_by('id'), [(self.a1.id,), (self.a2.id,), (self.a3.id,), (self.a4.id,), (self.a5.id,), (self.a6.id,), (self.a7.id,)], ) self.assertSequenceEqual( Article.objects.values_list('id', flat=True).order_by('id'), [self.a1.id, self.a2.id, self.a3.id, self.a4.id, self.a5.id, self.a6.id, self.a7.id], ) self.assertSequenceEqual( Article.objects.extra(select={'id_plus_one': 'id+1'}).order_by('id').values_list('id'), [(self.a1.id,), (self.a2.id,), (self.a3.id,), (self.a4.id,), (self.a5.id,), (self.a6.id,), (self.a7.id,)], ) self.assertSequenceEqual( Article.objects.extra(select={'id_plus_one': 'id+1'}).order_by('id').values_list('id_plus_one', 'id'), [ (self.a1.id + 1, self.a1.id), (self.a2.id + 1, self.a2.id), (self.a3.id + 1, self.a3.id), (self.a4.id + 1, self.a4.id), (self.a5.id + 1, self.a5.id), (self.a6.id + 1, self.a6.id), (self.a7.id + 1, self.a7.id) ], ) self.assertSequenceEqual( Article.objects.extra(select={'id_plus_one': 'id+1'}).order_by('id').values_list('id', 'id_plus_one'), [ (self.a1.id, self.a1.id + 1), (self.a2.id, self.a2.id + 1), (self.a3.id, self.a3.id + 1), (self.a4.id, self.a4.id + 1), (self.a5.id, self.a5.id + 1), (self.a6.id, self.a6.id + 1), (self.a7.id, self.a7.id + 1) ], ) args = ('name', 'article__headline', 'article__tag__name') self.assertSequenceEqual( Author.objects.values_list(*args).order_by(*args), [ (self.au1.name, self.a1.headline, self.t1.name), (self.au1.name, self.a2.headline, self.t1.name), (self.au1.name, self.a3.headline, self.t1.name), (self.au1.name, self.a3.headline, self.t2.name), (self.au1.name, self.a4.headline, self.t2.name), (self.au2.name, self.a5.headline, self.t2.name), (self.au2.name, self.a5.headline, self.t3.name), (self.au2.name, self.a6.headline, self.t3.name), (self.au2.name, self.a7.headline, self.t3.name), ], ) with self.assertRaises(TypeError): Article.objects.values_list('id', 'headline', flat=True) def test_get_next_previous_by(self): # Every DateField and DateTimeField creates get_next_by_FOO() and # get_previous_by_FOO() methods. In the case of identical date values, # these methods will use the ID as a fallback check. This guarantees # that no records are skipped or duplicated. self.assertEqual(repr(self.a1.get_next_by_pub_date()), '<Article: Article 2>') self.assertEqual(repr(self.a2.get_next_by_pub_date()), '<Article: Article 3>') self.assertEqual(repr(self.a2.get_next_by_pub_date(headline__endswith='6')), '<Article: Article 6>') self.assertEqual(repr(self.a3.get_next_by_pub_date()), '<Article: Article 7>') self.assertEqual(repr(self.a4.get_next_by_pub_date()), '<Article: Article 6>') with self.assertRaises(Article.DoesNotExist): self.a5.get_next_by_pub_date() self.assertEqual(repr(self.a6.get_next_by_pub_date()), '<Article: Article 5>') self.assertEqual(repr(self.a7.get_next_by_pub_date()), '<Article: Article 4>') self.assertEqual(repr(self.a7.get_previous_by_pub_date()), '<Article: Article 3>') self.assertEqual(repr(self.a6.get_previous_by_pub_date()), '<Article: Article 4>') self.assertEqual(repr(self.a5.get_previous_by_pub_date()), '<Article: Article 6>') self.assertEqual(repr(self.a4.get_previous_by_pub_date()), '<Article: Article 7>') self.assertEqual(repr(self.a3.get_previous_by_pub_date()), '<Article: Article 2>') self.assertEqual(repr(self.a2.get_previous_by_pub_date()), '<Article: Article 1>') def test_escaping(self): # Underscores, percent signs and backslashes have special meaning in the # underlying SQL code, but Django handles the quoting of them automatically. Article.objects.create(headline='Article_ with underscore', pub_date=datetime(2005, 11, 20)) self.assertQuerysetEqual( Article.objects.filter(headline__startswith='Article'), [ '<Article: Article_ with underscore>', '<Article: Article 5>', '<Article: Article 6>', '<Article: Article 4>', '<Article: Article 2>', '<Article: Article 3>', '<Article: Article 7>', '<Article: Article 1>', ] ) self.assertQuerysetEqual( Article.objects.filter(headline__startswith='Article_'), ['<Article: Article_ with underscore>'] ) Article.objects.create(headline='Article% with percent sign', pub_date=datetime(2005, 11, 21)) self.assertQuerysetEqual( Article.objects.filter(headline__startswith='Article'), [ '<Article: Article% with percent sign>', '<Article: Article_ with underscore>', '<Article: Article 5>', '<Article: Article 6>', '<Article: Article 4>', '<Article: Article 2>', '<Article: Article 3>', '<Article: Article 7>', '<Article: Article 1>', ] ) self.assertQuerysetEqual( Article.objects.filter(headline__startswith='Article%'), ['<Article: Article% with percent sign>'] ) Article.objects.create(headline='Article with \\ backslash', pub_date=datetime(2005, 11, 22)) self.assertQuerysetEqual( Article.objects.filter(headline__contains='\\'), [r'<Article: Article with \ backslash>'] ) def test_exclude(self): Article.objects.bulk_create([ Article(headline='Article_ with underscore', pub_date=datetime(2005, 11, 20)), Article(headline='Article% with percent sign', pub_date=datetime(2005, 11, 21)), Article(headline='Article with \\ backslash', pub_date=datetime(2005, 11, 22)), ]) # exclude() is the opposite of filter() when doing lookups: self.assertQuerysetEqual( Article.objects.filter(headline__contains='Article').exclude(headline__contains='with'), [ '<Article: Article 5>', '<Article: Article 6>', '<Article: Article 4>', '<Article: Article 2>', '<Article: Article 3>', '<Article: Article 7>', '<Article: Article 1>', ] ) self.assertQuerysetEqual( Article.objects.exclude(headline__startswith="Article_"), [ '<Article: Article with \\ backslash>', '<Article: Article% with percent sign>', '<Article: Article 5>', '<Article: Article 6>', '<Article: Article 4>', '<Article: Article 2>', '<Article: Article 3>', '<Article: Article 7>', '<Article: Article 1>', ] ) self.assertQuerysetEqual( Article.objects.exclude(headline="Article 7"), [ '<Article: Article with \\ backslash>', '<Article: Article% with percent sign>', '<Article: Article_ with underscore>', '<Article: Article 5>', '<Article: Article 6>', '<Article: Article 4>', '<Article: Article 2>', '<Article: Article 3>', '<Article: Article 1>', ] ) def test_none(self): # none() returns a QuerySet that behaves like any other QuerySet object self.assertQuerysetEqual(Article.objects.none(), []) self.assertQuerysetEqual(Article.objects.none().filter(headline__startswith='Article'), []) self.assertQuerysetEqual(Article.objects.filter(headline__startswith='Article').none(), []) self.assertEqual(Article.objects.none().count(), 0) self.assertEqual(Article.objects.none().update(headline="This should not take effect"), 0) self.assertQuerysetEqual(Article.objects.none().iterator(), []) def test_in(self): # using __in with an empty list should return an empty query set self.assertQuerysetEqual(Article.objects.filter(id__in=[]), []) self.assertQuerysetEqual( Article.objects.exclude(id__in=[]), [ '<Article: Article 5>', '<Article: Article 6>', '<Article: Article 4>', '<Article: Article 2>', '<Article: Article 3>', '<Article: Article 7>', '<Article: Article 1>', ] ) def test_in_different_database(self): with self.assertRaisesMessage( ValueError, "Subqueries aren't allowed across different databases. Force the " "inner query to be evaluated using `list(inner_query)`." ): list(Article.objects.filter(id__in=Article.objects.using('other').all())) def test_in_keeps_value_ordering(self): query = Article.objects.filter(slug__in=['a%d' % i for i in range(1, 8)]).values('pk').query self.assertIn(' IN (a1, a2, a3, a4, a5, a6, a7) ', str(query)) def test_error_messages(self): # Programming errors are pointed out with nice error messages with self.assertRaisesMessage( FieldError, "Cannot resolve keyword 'pub_date_year' into field. Choices are: " "author, author_id, headline, id, pub_date, slug, tag" ): Article.objects.filter(pub_date_year='2005').count() def test_unsupported_lookups(self): with self.assertRaisesMessage( FieldError, "Unsupported lookup 'starts' for CharField or join on the field " "not permitted, perhaps you meant startswith or istartswith?" ): Article.objects.filter(headline__starts='Article') with self.assertRaisesMessage( FieldError, "Unsupported lookup 'is_null' for DateTimeField or join on the field " "not permitted, perhaps you meant isnull?" ): Article.objects.filter(pub_date__is_null=True) with self.assertRaisesMessage( FieldError, "Unsupported lookup 'gobbledygook' for DateTimeField or join on the field " "not permitted." ): Article.objects.filter(pub_date__gobbledygook='blahblah') def test_relation_nested_lookup_error(self): # An invalid nested lookup on a related field raises a useful error. msg = 'Related Field got invalid lookup: editor' with self.assertRaisesMessage(FieldError, msg): Article.objects.filter(author__editor__name='James') msg = 'Related Field got invalid lookup: foo' with self.assertRaisesMessage(FieldError, msg): Tag.objects.filter(articles__foo='bar') def test_regex(self): # Create some articles with a bit more interesting headlines for testing field lookups: for a in Article.objects.all(): a.delete() now = datetime.now() Article.objects.bulk_create([ Article(pub_date=now, headline='f'), Article(pub_date=now, headline='fo'), Article(pub_date=now, headline='foo'), Article(pub_date=now, headline='fooo'), Article(pub_date=now, headline='hey-Foo'), Article(pub_date=now, headline='bar'), Article(pub_date=now, headline='AbBa'), Article(pub_date=now, headline='baz'), Article(pub_date=now, headline='baxZ'), ]) # zero-or-more self.assertQuerysetEqual( Article.objects.filter(headline__regex=r'fo*'), ['<Article: f>', '<Article: fo>', '<Article: foo>', '<Article: fooo>'] ) self.assertQuerysetEqual( Article.objects.filter(headline__iregex=r'fo*'), [ '<Article: f>', '<Article: fo>', '<Article: foo>', '<Article: fooo>', '<Article: hey-Foo>', ] ) # one-or-more self.assertQuerysetEqual( Article.objects.filter(headline__regex=r'fo+'), ['<Article: fo>', '<Article: foo>', '<Article: fooo>'] ) # wildcard self.assertQuerysetEqual( Article.objects.filter(headline__regex=r'fooo?'), ['<Article: foo>', '<Article: fooo>'] ) # leading anchor self.assertQuerysetEqual( Article.objects.filter(headline__regex=r'^b'), ['<Article: bar>', '<Article: baxZ>', '<Article: baz>'] ) self.assertQuerysetEqual(Article.objects.filter(headline__iregex=r'^a'), ['<Article: AbBa>']) # trailing anchor self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'z$'), ['<Article: baz>']) self.assertQuerysetEqual( Article.objects.filter(headline__iregex=r'z$'), ['<Article: baxZ>', '<Article: baz>'] ) # character sets self.assertQuerysetEqual( Article.objects.filter(headline__regex=r'ba[rz]'), ['<Article: bar>', '<Article: baz>'] ) self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'ba.[RxZ]'), ['<Article: baxZ>']) self.assertQuerysetEqual( Article.objects.filter(headline__iregex=r'ba[RxZ]'), ['<Article: bar>', '<Article: baxZ>', '<Article: baz>'] ) # and more articles: Article.objects.bulk_create([ Article(pub_date=now, headline='foobar'), Article(pub_date=now, headline='foobaz'), Article(pub_date=now, headline='ooF'), Article(pub_date=now, headline='foobarbaz'), Article(pub_date=now, headline='zoocarfaz'), Article(pub_date=now, headline='barfoobaz'), Article(pub_date=now, headline='bazbaRFOO'), ]) # alternation self.assertQuerysetEqual( Article.objects.filter(headline__regex=r'oo(f|b)'), [ '<Article: barfoobaz>', '<Article: foobar>', '<Article: foobarbaz>', '<Article: foobaz>', ] ) self.assertQuerysetEqual( Article.objects.filter(headline__iregex=r'oo(f|b)'), [ '<Article: barfoobaz>', '<Article: foobar>', '<Article: foobarbaz>', '<Article: foobaz>', '<Article: ooF>', ] ) self.assertQuerysetEqual( Article.objects.filter(headline__regex=r'^foo(f|b)'), ['<Article: foobar>', '<Article: foobarbaz>', '<Article: foobaz>'] ) # greedy matching self.assertQuerysetEqual( Article.objects.filter(headline__regex=r'b.*az'), [ '<Article: barfoobaz>', '<Article: baz>', '<Article: bazbaRFOO>', '<Article: foobarbaz>', '<Article: foobaz>', ] ) self.assertQuerysetEqual( Article.objects.filter(headline__iregex=r'b.*ar'), [ '<Article: bar>', '<Article: barfoobaz>', '<Article: bazbaRFOO>', '<Article: foobar>', '<Article: foobarbaz>', ] ) @skipUnlessDBFeature('supports_regex_backreferencing') def test_regex_backreferencing(self): # grouping and backreferences now = datetime.now() Article.objects.bulk_create([ Article(pub_date=now, headline='foobar'), Article(pub_date=now, headline='foobaz'), Article(pub_date=now, headline='ooF'), Article(pub_date=now, headline='foobarbaz'), Article(pub_date=now, headline='zoocarfaz'), Article(pub_date=now, headline='barfoobaz'), Article(pub_date=now, headline='bazbaRFOO'), ]) self.assertQuerysetEqual( Article.objects.filter(headline__regex=r'b(.).*b\1'), ['<Article: barfoobaz>', '<Article: bazbaRFOO>', '<Article: foobarbaz>'] ) def test_regex_null(self): """ A regex lookup does not fail on null/None values """ Season.objects.create(year=2012, gt=None) self.assertQuerysetEqual(Season.objects.filter(gt__regex=r'^$'), []) def test_regex_non_string(self): """ A regex lookup does not fail on non-string fields """ Season.objects.create(year=2013, gt=444) self.assertQuerysetEqual(Season.objects.filter(gt__regex=r'^444$'), ['<Season: 2013>']) def test_regex_non_ascii(self): """ A regex lookup does not trip on non-ASCII characters. """ Player.objects.create(name='\u2660') Player.objects.get(name__regex='\u2660') def test_nonfield_lookups(self): """ A lookup query containing non-fields raises the proper exception. """ msg = "Unsupported lookup 'blahblah' for CharField or join on the field not permitted." with self.assertRaisesMessage(FieldError, msg): Article.objects.filter(headline__blahblah=99) with self.assertRaisesMessage(FieldError, msg): Article.objects.filter(headline__blahblah__exact=99) msg = ( "Cannot resolve keyword 'blahblah' into field. Choices are: " "author, author_id, headline, id, pub_date, slug, tag" ) with self.assertRaisesMessage(FieldError, msg): Article.objects.filter(blahblah=99) def test_lookup_collision(self): """ Genuine field names don't collide with built-in lookup types ('year', 'gt', 'range', 'in' etc.) (#11670). """ # 'gt' is used as a code number for the year, e.g. 111=>2009. season_2009 = Season.objects.create(year=2009, gt=111) season_2009.games.create(home="Houston Astros", away="St. Louis Cardinals") season_2010 = Season.objects.create(year=2010, gt=222) season_2010.games.create(home="Houston Astros", away="Chicago Cubs") season_2010.games.create(home="Houston Astros", away="Milwaukee Brewers") season_2010.games.create(home="Houston Astros", away="St. Louis Cardinals") season_2011 = Season.objects.create(year=2011, gt=333) season_2011.games.create(home="Houston Astros", away="St. Louis Cardinals") season_2011.games.create(home="Houston Astros", away="Milwaukee Brewers") hunter_pence = Player.objects.create(name="Hunter Pence") hunter_pence.games.set(Game.objects.filter(season__year__in=[2009, 2010])) pudge = Player.objects.create(name="Ivan Rodriquez") pudge.games.set(Game.objects.filter(season__year=2009)) pedro_feliz = Player.objects.create(name="Pedro Feliz") pedro_feliz.games.set(Game.objects.filter(season__year__in=[2011])) johnson = Player.objects.create(name="Johnson") johnson.games.set(Game.objects.filter(season__year__in=[2011])) # Games in 2010 self.assertEqual(Game.objects.filter(season__year=2010).count(), 3) self.assertEqual(Game.objects.filter(season__year__exact=2010).count(), 3) self.assertEqual(Game.objects.filter(season__gt=222).count(), 3) self.assertEqual(Game.objects.filter(season__gt__exact=222).count(), 3) # Games in 2011 self.assertEqual(Game.objects.filter(season__year=2011).count(), 2) self.assertEqual(Game.objects.filter(season__year__exact=2011).count(), 2) self.assertEqual(Game.objects.filter(season__gt=333).count(), 2) self.assertEqual(Game.objects.filter(season__gt__exact=333).count(), 2) self.assertEqual(Game.objects.filter(season__year__gt=2010).count(), 2) self.assertEqual(Game.objects.filter(season__gt__gt=222).count(), 2) # Games played in 2010 and 2011 self.assertEqual(Game.objects.filter(season__year__in=[2010, 2011]).count(), 5) self.assertEqual(Game.objects.filter(season__year__gt=2009).count(), 5) self.assertEqual(Game.objects.filter(season__gt__in=[222, 333]).count(), 5) self.assertEqual(Game.objects.filter(season__gt__gt=111).count(), 5) # Players who played in 2009 self.assertEqual(Player.objects.filter(games__season__year=2009).distinct().count(), 2) self.assertEqual(Player.objects.filter(games__season__year__exact=2009).distinct().count(), 2) self.assertEqual(Player.objects.filter(games__season__gt=111).distinct().count(), 2) self.assertEqual(Player.objects.filter(games__season__gt__exact=111).distinct().count(), 2) # Players who played in 2010 self.assertEqual(Player.objects.filter(games__season__year=2010).distinct().count(), 1) self.assertEqual(Player.objects.filter(games__season__year__exact=2010).distinct().count(), 1) self.assertEqual(Player.objects.filter(games__season__gt=222).distinct().count(), 1) self.assertEqual(Player.objects.filter(games__season__gt__exact=222).distinct().count(), 1) # Players who played in 2011 self.assertEqual(Player.objects.filter(games__season__year=2011).distinct().count(), 2) self.assertEqual(Player.objects.filter(games__season__year__exact=2011).distinct().count(), 2) self.assertEqual(Player.objects.filter(games__season__gt=333).distinct().count(), 2) self.assertEqual(Player.objects.filter(games__season__year__gt=2010).distinct().count(), 2) self.assertEqual(Player.objects.filter(games__season__gt__gt=222).distinct().count(), 2) def test_chain_date_time_lookups(self): self.assertQuerysetEqual( Article.objects.filter(pub_date__month__gt=7), ['<Article: Article 5>', '<Article: Article 6>'], ordered=False ) self.assertQuerysetEqual( Article.objects.filter(pub_date__day__gte=27), ['<Article: Article 2>', '<Article: Article 3>', '<Article: Article 4>', '<Article: Article 7>'], ordered=False ) self.assertQuerysetEqual( Article.objects.filter(pub_date__hour__lt=8), ['<Article: Article 1>', '<Article: Article 2>', '<Article: Article 3>', '<Article: Article 4>', '<Article: Article 7>'], ordered=False ) self.assertQuerysetEqual( Article.objects.filter(pub_date__minute__lte=0), ['<Article: Article 1>', '<Article: Article 2>', '<Article: Article 3>', '<Article: Article 4>', '<Article: Article 5>', '<Article: Article 6>', '<Article: Article 7>'], ordered=False ) def test_exact_none_transform(self): """Transforms are used for __exact=None.""" Season.objects.create(year=1, nulled_text_field='not null') self.assertFalse(Season.objects.filter(nulled_text_field__isnull=True)) self.assertTrue(Season.objects.filter(nulled_text_field__nulled__isnull=True)) self.assertTrue(Season.objects.filter(nulled_text_field__nulled__exact=None)) self.assertTrue(Season.objects.filter(nulled_text_field__nulled=None)) def test_exact_sliced_queryset_limit_one(self): self.assertCountEqual( Article.objects.filter(author=Author.objects.all()[:1]), [self.a1, self.a2, self.a3, self.a4] ) def test_exact_sliced_queryset_limit_one_offset(self): self.assertCountEqual( Article.objects.filter(author=Author.objects.all()[1:2]), [self.a5, self.a6, self.a7] ) def test_exact_sliced_queryset_not_limited_to_one(self): msg = ( 'The QuerySet value for an exact lookup must be limited to one ' 'result using slicing.' ) with self.assertRaisesMessage(ValueError, msg): list(Article.objects.filter(author=Author.objects.all()[:2])) with self.assertRaisesMessage(ValueError, msg): list(Article.objects.filter(author=Author.objects.all()[1:])) def test_custom_field_none_rhs(self): """ __exact=value is transformed to __isnull=True if Field.get_prep_value() converts value to None. """ season = Season.objects.create(year=2012, nulled_text_field=None) self.assertTrue(Season.objects.filter(pk=season.pk, nulled_text_field__isnull=True)) self.assertTrue(Season.objects.filter(pk=season.pk, nulled_text_field='')) def test_pattern_lookups_with_substr(self): a = Author.objects.create(name='John Smith', alias='Johx') b = Author.objects.create(name='Rhonda Simpson', alias='sonx') tests = ( ('startswith', [a]), ('istartswith', [a]), ('contains', [a, b]), ('icontains', [a, b]), ('endswith', [b]), ('iendswith', [b]), ) for lookup, result in tests: with self.subTest(lookup=lookup): authors = Author.objects.filter(**{'name__%s' % lookup: Substr('alias', 1, 3)}) self.assertCountEqual(authors, result) def test_custom_lookup_none_rhs(self): """Lookup.can_use_none_as_rhs=True allows None as a lookup value.""" season = Season.objects.create(year=2012, nulled_text_field=None) query = Season.objects.get_queryset().query field = query.model._meta.get_field('nulled_text_field') self.assertIsInstance(query.build_lookup(['isnull_none_rhs'], field, None), IsNullWithNoneAsRHS) self.assertTrue(Season.objects.filter(pk=season.pk, nulled_text_field__isnull_none_rhs=True)) def test_exact_exists(self): qs = Article.objects.filter(pk=OuterRef('pk')) seasons = Season.objects.annotate( pk_exists=Exists(qs), ).filter( pk_exists=Exists(qs), ) self.assertCountEqual(seasons, Season.objects.all())
ba4e73a95d7a0f3b8d26da3c30c8a4f6c85eb4dd9c76c524b03ca5ea109c768d
import copy import datetime import json import uuid from django.core.exceptions import NON_FIELD_ERRORS from django.core.files.uploadedfile import SimpleUploadedFile from django.core.validators import MaxValueValidator, RegexValidator from django.forms import ( BooleanField, CharField, CheckboxSelectMultiple, ChoiceField, DateField, DateTimeField, EmailField, FileField, FloatField, Form, HiddenInput, ImageField, IntegerField, MultipleChoiceField, MultipleHiddenInput, MultiValueField, NullBooleanField, PasswordInput, RadioSelect, Select, SplitDateTimeField, SplitHiddenDateTimeWidget, Textarea, TextInput, TimeField, ValidationError, forms, ) from django.forms.renderers import DjangoTemplates, get_default_renderer from django.forms.utils import ErrorList from django.http import QueryDict from django.template import Context, Template from django.test import SimpleTestCase from django.utils.datastructures import MultiValueDict from django.utils.safestring import mark_safe class Person(Form): first_name = CharField() last_name = CharField() birthday = DateField() class PersonNew(Form): first_name = CharField(widget=TextInput(attrs={'id': 'first_name_id'})) last_name = CharField() birthday = DateField() class MultiValueDictLike(dict): def getlist(self, key): return [self[key]] class FormsTestCase(SimpleTestCase): # A Form is a collection of Fields. It knows how to validate a set of data and it # knows how to render itself in a couple of default ways (e.g., an HTML table). # You can pass it data in __init__(), as a dictionary. def test_form(self): # Pass a dictionary to a Form's __init__(). p = Person({'first_name': 'John', 'last_name': 'Lennon', 'birthday': '1940-10-9'}) self.assertTrue(p.is_bound) self.assertEqual(p.errors, {}) self.assertTrue(p.is_valid()) self.assertHTMLEqual(p.errors.as_ul(), '') self.assertEqual(p.errors.as_text(), '') self.assertEqual(p.cleaned_data["first_name"], 'John') self.assertEqual(p.cleaned_data["last_name"], 'Lennon') self.assertEqual(p.cleaned_data["birthday"], datetime.date(1940, 10, 9)) self.assertHTMLEqual( str(p['first_name']), '<input type="text" name="first_name" value="John" id="id_first_name" required>' ) self.assertHTMLEqual( str(p['last_name']), '<input type="text" name="last_name" value="Lennon" id="id_last_name" required>' ) self.assertHTMLEqual( str(p['birthday']), '<input type="text" name="birthday" value="1940-10-9" id="id_birthday" required>' ) msg = "Key 'nonexistentfield' not found in 'Person'. Choices are: birthday, first_name, last_name." with self.assertRaisesMessage(KeyError, msg): p['nonexistentfield'] form_output = [] for boundfield in p: form_output.append(str(boundfield)) self.assertHTMLEqual( '\n'.join(form_output), """<input type="text" name="first_name" value="John" id="id_first_name" required> <input type="text" name="last_name" value="Lennon" id="id_last_name" required> <input type="text" name="birthday" value="1940-10-9" id="id_birthday" required>""" ) form_output = [] for boundfield in p: form_output.append([boundfield.label, boundfield.data]) self.assertEqual(form_output, [ ['First name', 'John'], ['Last name', 'Lennon'], ['Birthday', '1940-10-9'] ]) self.assertHTMLEqual( str(p), """<tr><th><label for="id_first_name">First name:</label></th><td> <input type="text" name="first_name" value="John" id="id_first_name" required></td></tr> <tr><th><label for="id_last_name">Last name:</label></th><td> <input type="text" name="last_name" value="Lennon" id="id_last_name" required></td></tr> <tr><th><label for="id_birthday">Birthday:</label></th><td> <input type="text" name="birthday" value="1940-10-9" id="id_birthday" required></td></tr>""" ) def test_empty_dict(self): # Empty dictionaries are valid, too. p = Person({}) self.assertTrue(p.is_bound) self.assertEqual(p.errors['first_name'], ['This field is required.']) self.assertEqual(p.errors['last_name'], ['This field is required.']) self.assertEqual(p.errors['birthday'], ['This field is required.']) self.assertFalse(p.is_valid()) self.assertEqual(p.cleaned_data, {}) self.assertHTMLEqual( str(p), """<tr><th><label for="id_first_name">First name:</label></th><td> <ul class="errorlist"><li>This field is required.</li></ul> <input type="text" name="first_name" id="id_first_name" required></td></tr> <tr><th><label for="id_last_name">Last name:</label></th> <td><ul class="errorlist"><li>This field is required.</li></ul> <input type="text" name="last_name" id="id_last_name" required></td></tr> <tr><th><label for="id_birthday">Birthday:</label></th><td> <ul class="errorlist"><li>This field is required.</li></ul> <input type="text" name="birthday" id="id_birthday" required></td></tr>""" ) self.assertHTMLEqual( p.as_table(), """<tr><th><label for="id_first_name">First name:</label></th><td> <ul class="errorlist"><li>This field is required.</li></ul> <input type="text" name="first_name" id="id_first_name" required></td></tr> <tr><th><label for="id_last_name">Last name:</label></th> <td><ul class="errorlist"><li>This field is required.</li></ul> <input type="text" name="last_name" id="id_last_name" required></td></tr> <tr><th><label for="id_birthday">Birthday:</label></th> <td><ul class="errorlist"><li>This field is required.</li></ul> <input type="text" name="birthday" id="id_birthday" required></td></tr>""" ) self.assertHTMLEqual( p.as_ul(), """<li><ul class="errorlist"><li>This field is required.</li></ul> <label for="id_first_name">First name:</label> <input type="text" name="first_name" id="id_first_name" required></li> <li><ul class="errorlist"><li>This field is required.</li></ul> <label for="id_last_name">Last name:</label> <input type="text" name="last_name" id="id_last_name" required></li> <li><ul class="errorlist"><li>This field is required.</li></ul> <label for="id_birthday">Birthday:</label> <input type="text" name="birthday" id="id_birthday" required></li>""" ) self.assertHTMLEqual( p.as_p(), """<ul class="errorlist"><li>This field is required.</li></ul> <p><label for="id_first_name">First name:</label> <input type="text" name="first_name" id="id_first_name" required></p> <ul class="errorlist"><li>This field is required.</li></ul> <p><label for="id_last_name">Last name:</label> <input type="text" name="last_name" id="id_last_name" required></p> <ul class="errorlist"><li>This field is required.</li></ul> <p><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" id="id_birthday" required></p>""" ) def test_empty_querydict_args(self): data = QueryDict() files = QueryDict() p = Person(data, files) self.assertIs(p.data, data) self.assertIs(p.files, files) def test_unbound_form(self): # If you don't pass any values to the Form's __init__(), or if you pass None, # the Form will be considered unbound and won't do any validation. Form.errors # will be an empty dictionary *but* Form.is_valid() will return False. p = Person() self.assertFalse(p.is_bound) self.assertEqual(p.errors, {}) self.assertFalse(p.is_valid()) with self.assertRaises(AttributeError): p.cleaned_data self.assertHTMLEqual( str(p), """<tr><th><label for="id_first_name">First name:</label></th><td> <input type="text" name="first_name" id="id_first_name" required></td></tr> <tr><th><label for="id_last_name">Last name:</label></th><td> <input type="text" name="last_name" id="id_last_name" required></td></tr> <tr><th><label for="id_birthday">Birthday:</label></th><td> <input type="text" name="birthday" id="id_birthday" required></td></tr>""" ) self.assertHTMLEqual( p.as_table(), """<tr><th><label for="id_first_name">First name:</label></th><td> <input type="text" name="first_name" id="id_first_name" required></td></tr> <tr><th><label for="id_last_name">Last name:</label></th><td> <input type="text" name="last_name" id="id_last_name" required></td></tr> <tr><th><label for="id_birthday">Birthday:</label></th><td> <input type="text" name="birthday" id="id_birthday" required></td></tr>""" ) self.assertHTMLEqual( p.as_ul(), """<li><label for="id_first_name">First name:</label> <input type="text" name="first_name" id="id_first_name" required></li> <li><label for="id_last_name">Last name:</label> <input type="text" name="last_name" id="id_last_name" required></li> <li><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" id="id_birthday" required></li>""" ) self.assertHTMLEqual( p.as_p(), """<p><label for="id_first_name">First name:</label> <input type="text" name="first_name" id="id_first_name" required></p> <p><label for="id_last_name">Last name:</label> <input type="text" name="last_name" id="id_last_name" required></p> <p><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" id="id_birthday" required></p>""" ) def test_unicode_values(self): # Unicode values are handled properly. p = Person({ 'first_name': 'John', 'last_name': '\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111', 'birthday': '1940-10-9' }) self.assertHTMLEqual( p.as_table(), '<tr><th><label for="id_first_name">First name:</label></th><td>' '<input type="text" name="first_name" value="John" id="id_first_name" required></td></tr>\n' '<tr><th><label for="id_last_name">Last name:</label>' '</th><td><input type="text" name="last_name" ' 'value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111"' 'id="id_last_name" required></td></tr>\n' '<tr><th><label for="id_birthday">Birthday:</label></th><td>' '<input type="text" name="birthday" value="1940-10-9" id="id_birthday" required></td></tr>' ) self.assertHTMLEqual( p.as_ul(), '<li><label for="id_first_name">First name:</label> ' '<input type="text" name="first_name" value="John" id="id_first_name" required></li>\n' '<li><label for="id_last_name">Last name:</label> ' '<input type="text" name="last_name" ' 'value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" id="id_last_name" required></li>\n' '<li><label for="id_birthday">Birthday:</label> ' '<input type="text" name="birthday" value="1940-10-9" id="id_birthday" required></li>' ) self.assertHTMLEqual( p.as_p(), '<p><label for="id_first_name">First name:</label> ' '<input type="text" name="first_name" value="John" id="id_first_name" required></p>\n' '<p><label for="id_last_name">Last name:</label> ' '<input type="text" name="last_name" ' 'value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" id="id_last_name" required></p>\n' '<p><label for="id_birthday">Birthday:</label> ' '<input type="text" name="birthday" value="1940-10-9" id="id_birthday" required></p>' ) p = Person({'last_name': 'Lennon'}) self.assertEqual(p.errors['first_name'], ['This field is required.']) self.assertEqual(p.errors['birthday'], ['This field is required.']) self.assertFalse(p.is_valid()) self.assertEqual( p.errors, {'birthday': ['This field is required.'], 'first_name': ['This field is required.']} ) self.assertEqual(p.cleaned_data, {'last_name': 'Lennon'}) self.assertEqual(p['first_name'].errors, ['This field is required.']) self.assertHTMLEqual( p['first_name'].errors.as_ul(), '<ul class="errorlist"><li>This field is required.</li></ul>' ) self.assertEqual(p['first_name'].errors.as_text(), '* This field is required.') p = Person() self.assertHTMLEqual( str(p['first_name']), '<input type="text" name="first_name" id="id_first_name" required>', ) self.assertHTMLEqual(str(p['last_name']), '<input type="text" name="last_name" id="id_last_name" required>') self.assertHTMLEqual(str(p['birthday']), '<input type="text" name="birthday" id="id_birthday" required>') def test_cleaned_data_only_fields(self): # cleaned_data will always *only* contain a key for fields defined in the # Form, even if you pass extra data when you define the Form. In this # example, we pass a bunch of extra fields to the form constructor, # but cleaned_data contains only the form's fields. data = { 'first_name': 'John', 'last_name': 'Lennon', 'birthday': '1940-10-9', 'extra1': 'hello', 'extra2': 'hello', } p = Person(data) self.assertTrue(p.is_valid()) self.assertEqual(p.cleaned_data['first_name'], 'John') self.assertEqual(p.cleaned_data['last_name'], 'Lennon') self.assertEqual(p.cleaned_data['birthday'], datetime.date(1940, 10, 9)) def test_optional_data(self): # cleaned_data will include a key and value for *all* fields defined in the Form, # even if the Form's data didn't include a value for fields that are not # required. In this example, the data dictionary doesn't include a value for the # "nick_name" field, but cleaned_data includes it. For CharFields, it's set to the # empty string. class OptionalPersonForm(Form): first_name = CharField() last_name = CharField() nick_name = CharField(required=False) data = {'first_name': 'John', 'last_name': 'Lennon'} f = OptionalPersonForm(data) self.assertTrue(f.is_valid()) self.assertEqual(f.cleaned_data['nick_name'], '') self.assertEqual(f.cleaned_data['first_name'], 'John') self.assertEqual(f.cleaned_data['last_name'], 'Lennon') # For DateFields, it's set to None. class OptionalPersonForm(Form): first_name = CharField() last_name = CharField() birth_date = DateField(required=False) data = {'first_name': 'John', 'last_name': 'Lennon'} f = OptionalPersonForm(data) self.assertTrue(f.is_valid()) self.assertIsNone(f.cleaned_data['birth_date']) self.assertEqual(f.cleaned_data['first_name'], 'John') self.assertEqual(f.cleaned_data['last_name'], 'Lennon') def test_auto_id(self): # "auto_id" tells the Form to add an "id" attribute to each form element. # If it's a string that contains '%s', Django will use that as a format string # into which the field's name will be inserted. It will also put a <label> around # the human-readable labels for a field. p = Person(auto_id='%s_id') self.assertHTMLEqual( p.as_table(), """<tr><th><label for="first_name_id">First name:</label></th><td> <input type="text" name="first_name" id="first_name_id" required></td></tr> <tr><th><label for="last_name_id">Last name:</label></th><td> <input type="text" name="last_name" id="last_name_id" required></td></tr> <tr><th><label for="birthday_id">Birthday:</label></th><td> <input type="text" name="birthday" id="birthday_id" required></td></tr>""" ) self.assertHTMLEqual( p.as_ul(), """<li><label for="first_name_id">First name:</label> <input type="text" name="first_name" id="first_name_id" required></li> <li><label for="last_name_id">Last name:</label> <input type="text" name="last_name" id="last_name_id" required></li> <li><label for="birthday_id">Birthday:</label> <input type="text" name="birthday" id="birthday_id" required></li>""" ) self.assertHTMLEqual( p.as_p(), """<p><label for="first_name_id">First name:</label> <input type="text" name="first_name" id="first_name_id" required></p> <p><label for="last_name_id">Last name:</label> <input type="text" name="last_name" id="last_name_id" required></p> <p><label for="birthday_id">Birthday:</label> <input type="text" name="birthday" id="birthday_id" required></p>""" ) def test_auto_id_true(self): # If auto_id is any True value whose str() does not contain '%s', the "id" # attribute will be the name of the field. p = Person(auto_id=True) self.assertHTMLEqual( p.as_ul(), """<li><label for="first_name">First name:</label> <input type="text" name="first_name" id="first_name" required></li> <li><label for="last_name">Last name:</label> <input type="text" name="last_name" id="last_name" required></li> <li><label for="birthday">Birthday:</label> <input type="text" name="birthday" id="birthday" required></li>""" ) def test_auto_id_false(self): # If auto_id is any False value, an "id" attribute won't be output unless it # was manually entered. p = Person(auto_id=False) self.assertHTMLEqual( p.as_ul(), """<li>First name: <input type="text" name="first_name" required></li> <li>Last name: <input type="text" name="last_name" required></li> <li>Birthday: <input type="text" name="birthday" required></li>""" ) def test_id_on_field(self): # In this example, auto_id is False, but the "id" attribute for the "first_name" # field is given. Also note that field gets a <label>, while the others don't. p = PersonNew(auto_id=False) self.assertHTMLEqual( p.as_ul(), """<li><label for="first_name_id">First name:</label> <input type="text" id="first_name_id" name="first_name" required></li> <li>Last name: <input type="text" name="last_name" required></li> <li>Birthday: <input type="text" name="birthday" required></li>""" ) def test_auto_id_on_form_and_field(self): # If the "id" attribute is specified in the Form and auto_id is True, the "id" # attribute in the Form gets precedence. p = PersonNew(auto_id=True) self.assertHTMLEqual( p.as_ul(), """<li><label for="first_name_id">First name:</label> <input type="text" id="first_name_id" name="first_name" required></li> <li><label for="last_name">Last name:</label> <input type="text" name="last_name" id="last_name" required></li> <li><label for="birthday">Birthday:</label> <input type="text" name="birthday" id="birthday" required></li>""" ) def test_various_boolean_values(self): class SignupForm(Form): email = EmailField() get_spam = BooleanField() f = SignupForm(auto_id=False) self.assertHTMLEqual(str(f['email']), '<input type="email" name="email" required>') self.assertHTMLEqual(str(f['get_spam']), '<input type="checkbox" name="get_spam" required>') f = SignupForm({'email': '[email protected]', 'get_spam': True}, auto_id=False) self.assertHTMLEqual(str(f['email']), '<input type="email" name="email" value="[email protected]" required>') self.assertHTMLEqual( str(f['get_spam']), '<input checked type="checkbox" name="get_spam" required>', ) # 'True' or 'true' should be rendered without a value attribute f = SignupForm({'email': '[email protected]', 'get_spam': 'True'}, auto_id=False) self.assertHTMLEqual( str(f['get_spam']), '<input checked type="checkbox" name="get_spam" required>', ) f = SignupForm({'email': '[email protected]', 'get_spam': 'true'}, auto_id=False) self.assertHTMLEqual( str(f['get_spam']), '<input checked type="checkbox" name="get_spam" required>') # A value of 'False' or 'false' should be rendered unchecked f = SignupForm({'email': '[email protected]', 'get_spam': 'False'}, auto_id=False) self.assertHTMLEqual(str(f['get_spam']), '<input type="checkbox" name="get_spam" required>') f = SignupForm({'email': '[email protected]', 'get_spam': 'false'}, auto_id=False) self.assertHTMLEqual(str(f['get_spam']), '<input type="checkbox" name="get_spam" required>') # A value of '0' should be interpreted as a True value (#16820) f = SignupForm({'email': '[email protected]', 'get_spam': '0'}) self.assertTrue(f.is_valid()) self.assertTrue(f.cleaned_data.get('get_spam')) def test_widget_output(self): # Any Field can have a Widget class passed to its constructor: class ContactForm(Form): subject = CharField() message = CharField(widget=Textarea) f = ContactForm(auto_id=False) self.assertHTMLEqual(str(f['subject']), '<input type="text" name="subject" required>') self.assertHTMLEqual(str(f['message']), '<textarea name="message" rows="10" cols="40" required></textarea>') # as_textarea(), as_text() and as_hidden() are shortcuts for changing the output # widget type: self.assertHTMLEqual( f['subject'].as_textarea(), '<textarea name="subject" rows="10" cols="40" required></textarea>', ) self.assertHTMLEqual(f['message'].as_text(), '<input type="text" name="message" required>') self.assertHTMLEqual(f['message'].as_hidden(), '<input type="hidden" name="message">') # The 'widget' parameter to a Field can also be an instance: class ContactForm(Form): subject = CharField() message = CharField(widget=Textarea(attrs={'rows': 80, 'cols': 20})) f = ContactForm(auto_id=False) self.assertHTMLEqual(str(f['message']), '<textarea name="message" rows="80" cols="20" required></textarea>') # Instance-level attrs are *not* carried over to as_textarea(), as_text() and # as_hidden(): self.assertHTMLEqual(f['message'].as_text(), '<input type="text" name="message" required>') f = ContactForm({'subject': 'Hello', 'message': 'I love you.'}, auto_id=False) self.assertHTMLEqual( f['subject'].as_textarea(), '<textarea rows="10" cols="40" name="subject" required>Hello</textarea>' ) self.assertHTMLEqual( f['message'].as_text(), '<input type="text" name="message" value="I love you." required>', ) self.assertHTMLEqual(f['message'].as_hidden(), '<input type="hidden" name="message" value="I love you.">') def test_forms_with_choices(self): # For a form with a <select>, use ChoiceField: class FrameworkForm(Form): name = CharField() language = ChoiceField(choices=[('P', 'Python'), ('J', 'Java')]) f = FrameworkForm(auto_id=False) self.assertHTMLEqual(str(f['language']), """<select name="language"> <option value="P">Python</option> <option value="J">Java</option> </select>""") f = FrameworkForm({'name': 'Django', 'language': 'P'}, auto_id=False) self.assertHTMLEqual(str(f['language']), """<select name="language"> <option value="P" selected>Python</option> <option value="J">Java</option> </select>""") # A subtlety: If one of the choices' value is the empty string and the form is # unbound, then the <option> for the empty-string choice will get selected. class FrameworkForm(Form): name = CharField() language = ChoiceField(choices=[('', '------'), ('P', 'Python'), ('J', 'Java')]) f = FrameworkForm(auto_id=False) self.assertHTMLEqual(str(f['language']), """<select name="language" required> <option value="" selected>------</option> <option value="P">Python</option> <option value="J">Java</option> </select>""") # You can specify widget attributes in the Widget constructor. class FrameworkForm(Form): name = CharField() language = ChoiceField(choices=[('P', 'Python'), ('J', 'Java')], widget=Select(attrs={'class': 'foo'})) f = FrameworkForm(auto_id=False) self.assertHTMLEqual(str(f['language']), """<select class="foo" name="language"> <option value="P">Python</option> <option value="J">Java</option> </select>""") f = FrameworkForm({'name': 'Django', 'language': 'P'}, auto_id=False) self.assertHTMLEqual(str(f['language']), """<select class="foo" name="language"> <option value="P" selected>Python</option> <option value="J">Java</option> </select>""") # When passing a custom widget instance to ChoiceField, note that setting # 'choices' on the widget is meaningless. The widget will use the choices # defined on the Field, not the ones defined on the Widget. class FrameworkForm(Form): name = CharField() language = ChoiceField( choices=[('P', 'Python'), ('J', 'Java')], widget=Select(choices=[('R', 'Ruby'), ('P', 'Perl')], attrs={'class': 'foo'}), ) f = FrameworkForm(auto_id=False) self.assertHTMLEqual(str(f['language']), """<select class="foo" name="language"> <option value="P">Python</option> <option value="J">Java</option> </select>""") f = FrameworkForm({'name': 'Django', 'language': 'P'}, auto_id=False) self.assertHTMLEqual(str(f['language']), """<select class="foo" name="language"> <option value="P" selected>Python</option> <option value="J">Java</option> </select>""") # You can set a ChoiceField's choices after the fact. class FrameworkForm(Form): name = CharField() language = ChoiceField() f = FrameworkForm(auto_id=False) self.assertHTMLEqual(str(f['language']), """<select name="language"> </select>""") f.fields['language'].choices = [('P', 'Python'), ('J', 'Java')] self.assertHTMLEqual(str(f['language']), """<select name="language"> <option value="P">Python</option> <option value="J">Java</option> </select>""") def test_forms_with_radio(self): # Add widget=RadioSelect to use that widget with a ChoiceField. class FrameworkForm(Form): name = CharField() language = ChoiceField(choices=[('P', 'Python'), ('J', 'Java')], widget=RadioSelect) f = FrameworkForm(auto_id=False) self.assertHTMLEqual(str(f['language']), """<ul> <li><label><input type="radio" name="language" value="P" required> Python</label></li> <li><label><input type="radio" name="language" value="J" required> Java</label></li> </ul>""") self.assertHTMLEqual(f.as_table(), """<tr><th>Name:</th><td><input type="text" name="name" required></td></tr> <tr><th>Language:</th><td><ul> <li><label><input type="radio" name="language" value="P" required> Python</label></li> <li><label><input type="radio" name="language" value="J" required> Java</label></li> </ul></td></tr>""") self.assertHTMLEqual(f.as_ul(), """<li>Name: <input type="text" name="name" required></li> <li>Language: <ul> <li><label><input type="radio" name="language" value="P" required> Python</label></li> <li><label><input type="radio" name="language" value="J" required> Java</label></li> </ul></li>""") # Regarding auto_id and <label>, RadioSelect is a special case. Each radio button # gets a distinct ID, formed by appending an underscore plus the button's # zero-based index. f = FrameworkForm(auto_id='id_%s') self.assertHTMLEqual( str(f['language']), """<ul id="id_language"> <li><label for="id_language_0"><input type="radio" id="id_language_0" value="P" name="language" required> Python</label></li> <li><label for="id_language_1"><input type="radio" id="id_language_1" value="J" name="language" required> Java</label></li> </ul>""" ) # When RadioSelect is used with auto_id, and the whole form is printed using # either as_table() or as_ul(), the label for the RadioSelect will point to the # ID of the *first* radio button. self.assertHTMLEqual( f.as_table(), """<tr><th><label for="id_name">Name:</label></th><td><input type="text" name="name" id="id_name" required></td></tr> <tr><th><label for="id_language_0">Language:</label></th><td><ul id="id_language"> <li><label for="id_language_0"><input type="radio" id="id_language_0" value="P" name="language" required> Python</label></li> <li><label for="id_language_1"><input type="radio" id="id_language_1" value="J" name="language" required> Java</label></li> </ul></td></tr>""" ) self.assertHTMLEqual( f.as_ul(), """<li><label for="id_name">Name:</label> <input type="text" name="name" id="id_name" required></li> <li><label for="id_language_0">Language:</label> <ul id="id_language"> <li><label for="id_language_0"><input type="radio" id="id_language_0" value="P" name="language" required> Python</label></li> <li><label for="id_language_1"><input type="radio" id="id_language_1" value="J" name="language" required> Java</label></li> </ul></li>""" ) self.assertHTMLEqual( f.as_p(), """<p><label for="id_name">Name:</label> <input type="text" name="name" id="id_name" required></p> <p><label for="id_language_0">Language:</label> <ul id="id_language"> <li><label for="id_language_0"><input type="radio" id="id_language_0" value="P" name="language" required> Python</label></li> <li><label for="id_language_1"><input type="radio" id="id_language_1" value="J" name="language" required> Java</label></li> </ul></p>""" ) # Test iterating on individual radios in a template t = Template('{% for radio in form.language %}<div class="myradio">{{ radio }}</div>{% endfor %}') self.assertHTMLEqual( t.render(Context({'form': f})), """<div class="myradio"><label for="id_language_0"> <input id="id_language_0" name="language" type="radio" value="P" required> Python</label></div> <div class="myradio"><label for="id_language_1"> <input id="id_language_1" name="language" type="radio" value="J" required> Java</label></div>""" ) def test_form_with_iterable_boundfield(self): class BeatleForm(Form): name = ChoiceField( choices=[('john', 'John'), ('paul', 'Paul'), ('george', 'George'), ('ringo', 'Ringo')], widget=RadioSelect, ) f = BeatleForm(auto_id=False) self.assertHTMLEqual( '\n'.join(str(bf) for bf in f['name']), """<label><input type="radio" name="name" value="john" required> John</label> <label><input type="radio" name="name" value="paul" required> Paul</label> <label><input type="radio" name="name" value="george" required> George</label> <label><input type="radio" name="name" value="ringo" required> Ringo</label>""" ) self.assertHTMLEqual( '\n'.join('<div>%s</div>' % bf for bf in f['name']), """<div><label><input type="radio" name="name" value="john" required> John</label></div> <div><label><input type="radio" name="name" value="paul" required> Paul</label></div> <div><label><input type="radio" name="name" value="george" required> George</label></div> <div><label><input type="radio" name="name" value="ringo" required> Ringo</label></div>""" ) def test_form_with_iterable_boundfield_id(self): class BeatleForm(Form): name = ChoiceField( choices=[('john', 'John'), ('paul', 'Paul'), ('george', 'George'), ('ringo', 'Ringo')], widget=RadioSelect, ) fields = list(BeatleForm()['name']) self.assertEqual(len(fields), 4) self.assertEqual(fields[0].id_for_label, 'id_name_0') self.assertEqual(fields[0].choice_label, 'John') self.assertHTMLEqual( fields[0].tag(), '<input type="radio" name="name" value="john" id="id_name_0" required>' ) self.assertHTMLEqual( str(fields[0]), '<label for="id_name_0"><input type="radio" name="name" ' 'value="john" id="id_name_0" required> John</label>' ) self.assertEqual(fields[1].id_for_label, 'id_name_1') self.assertEqual(fields[1].choice_label, 'Paul') self.assertHTMLEqual( fields[1].tag(), '<input type="radio" name="name" value="paul" id="id_name_1" required>' ) self.assertHTMLEqual( str(fields[1]), '<label for="id_name_1"><input type="radio" name="name" ' 'value="paul" id="id_name_1" required> Paul</label>' ) def test_iterable_boundfield_select(self): class BeatleForm(Form): name = ChoiceField(choices=[('john', 'John'), ('paul', 'Paul'), ('george', 'George'), ('ringo', 'Ringo')]) fields = list(BeatleForm(auto_id=False)['name']) self.assertEqual(len(fields), 4) self.assertEqual(fields[0].id_for_label, 'id_name_0') self.assertEqual(fields[0].choice_label, 'John') self.assertHTMLEqual(fields[0].tag(), '<option value="john">John</option>') self.assertHTMLEqual(str(fields[0]), '<option value="john">John</option>') def test_form_with_noniterable_boundfield(self): # You can iterate over any BoundField, not just those with widget=RadioSelect. class BeatleForm(Form): name = CharField() f = BeatleForm(auto_id=False) self.assertHTMLEqual('\n'.join(str(bf) for bf in f['name']), '<input type="text" name="name" required>') def test_boundfield_slice(self): class BeatleForm(Form): name = ChoiceField( choices=[('john', 'John'), ('paul', 'Paul'), ('george', 'George'), ('ringo', 'Ringo')], widget=RadioSelect, ) f = BeatleForm() bf = f['name'] self.assertEqual( [str(item) for item in bf[1:]], [str(bf[1]), str(bf[2]), str(bf[3])], ) def test_boundfield_invalid_index(self): class TestForm(Form): name = ChoiceField(choices=[]) field = TestForm()['name'] msg = 'BoundField indices must be integers or slices, not str.' with self.assertRaisesMessage(TypeError, msg): field['foo'] def test_boundfield_bool(self): """BoundField without any choices (subwidgets) evaluates to True.""" class TestForm(Form): name = ChoiceField(choices=[]) self.assertIs(bool(TestForm()['name']), True) def test_forms_with_multiple_choice(self): # MultipleChoiceField is a special case, as its data is required to be a list: class SongForm(Form): name = CharField() composers = MultipleChoiceField() f = SongForm(auto_id=False) self.assertHTMLEqual(str(f['composers']), """<select multiple name="composers" required> </select>""") class SongForm(Form): name = CharField() composers = MultipleChoiceField(choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')]) f = SongForm(auto_id=False) self.assertHTMLEqual(str(f['composers']), """<select multiple name="composers" required> <option value="J">John Lennon</option> <option value="P">Paul McCartney</option> </select>""") f = SongForm({'name': 'Yesterday', 'composers': ['P']}, auto_id=False) self.assertHTMLEqual(str(f['name']), '<input type="text" name="name" value="Yesterday" required>') self.assertHTMLEqual(str(f['composers']), """<select multiple name="composers" required> <option value="J">John Lennon</option> <option value="P" selected>Paul McCartney</option> </select>""") def test_form_with_disabled_fields(self): class PersonForm(Form): name = CharField() birthday = DateField(disabled=True) class PersonFormFieldInitial(Form): name = CharField() birthday = DateField(disabled=True, initial=datetime.date(1974, 8, 16)) # Disabled fields are generally not transmitted by user agents. # The value from the form's initial data is used. f1 = PersonForm({'name': 'John Doe'}, initial={'birthday': datetime.date(1974, 8, 16)}) f2 = PersonFormFieldInitial({'name': 'John Doe'}) for form in (f1, f2): self.assertTrue(form.is_valid()) self.assertEqual( form.cleaned_data, {'birthday': datetime.date(1974, 8, 16), 'name': 'John Doe'} ) # Values provided in the form's data are ignored. data = {'name': 'John Doe', 'birthday': '1984-11-10'} f1 = PersonForm(data, initial={'birthday': datetime.date(1974, 8, 16)}) f2 = PersonFormFieldInitial(data) for form in (f1, f2): self.assertTrue(form.is_valid()) self.assertEqual( form.cleaned_data, {'birthday': datetime.date(1974, 8, 16), 'name': 'John Doe'} ) # Initial data remains present on invalid forms. data = {} f1 = PersonForm(data, initial={'birthday': datetime.date(1974, 8, 16)}) f2 = PersonFormFieldInitial(data) for form in (f1, f2): self.assertFalse(form.is_valid()) self.assertEqual(form['birthday'].value(), datetime.date(1974, 8, 16)) def test_hidden_data(self): class SongForm(Form): name = CharField() composers = MultipleChoiceField(choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')]) # MultipleChoiceField rendered as_hidden() is a special case. Because it can # have multiple values, its as_hidden() renders multiple <input type="hidden"> # tags. f = SongForm({'name': 'Yesterday', 'composers': ['P']}, auto_id=False) self.assertHTMLEqual(f['composers'].as_hidden(), '<input type="hidden" name="composers" value="P">') f = SongForm({'name': 'From Me To You', 'composers': ['P', 'J']}, auto_id=False) self.assertHTMLEqual(f['composers'].as_hidden(), """<input type="hidden" name="composers" value="P"> <input type="hidden" name="composers" value="J">""") # DateTimeField rendered as_hidden() is special too class MessageForm(Form): when = SplitDateTimeField() f = MessageForm({'when_0': '1992-01-01', 'when_1': '01:01'}) self.assertTrue(f.is_valid()) self.assertHTMLEqual( str(f['when']), '<input type="text" name="when_0" value="1992-01-01" id="id_when_0" required>' '<input type="text" name="when_1" value="01:01" id="id_when_1" required>' ) self.assertHTMLEqual( f['when'].as_hidden(), '<input type="hidden" name="when_0" value="1992-01-01" id="id_when_0">' '<input type="hidden" name="when_1" value="01:01" id="id_when_1">' ) def test_multiple_choice_checkbox(self): # MultipleChoiceField can also be used with the CheckboxSelectMultiple widget. class SongForm(Form): name = CharField() composers = MultipleChoiceField( choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')], widget=CheckboxSelectMultiple, ) f = SongForm(auto_id=False) self.assertHTMLEqual(str(f['composers']), """<ul> <li><label><input type="checkbox" name="composers" value="J"> John Lennon</label></li> <li><label><input type="checkbox" name="composers" value="P"> Paul McCartney</label></li> </ul>""") f = SongForm({'composers': ['J']}, auto_id=False) self.assertHTMLEqual(str(f['composers']), """<ul> <li><label><input checked type="checkbox" name="composers" value="J"> John Lennon</label></li> <li><label><input type="checkbox" name="composers" value="P"> Paul McCartney</label></li> </ul>""") f = SongForm({'composers': ['J', 'P']}, auto_id=False) self.assertHTMLEqual(str(f['composers']), """<ul> <li><label><input checked type="checkbox" name="composers" value="J"> John Lennon</label></li> <li><label><input checked type="checkbox" name="composers" value="P"> Paul McCartney</label></li> </ul>""") # Test iterating on individual checkboxes in a template t = Template('{% for checkbox in form.composers %}<div class="mycheckbox">{{ checkbox }}</div>{% endfor %}') self.assertHTMLEqual(t.render(Context({'form': f})), """<div class="mycheckbox"><label> <input checked name="composers" type="checkbox" value="J"> John Lennon</label></div> <div class="mycheckbox"><label> <input checked name="composers" type="checkbox" value="P"> Paul McCartney</label></div>""") def test_checkbox_auto_id(self): # Regarding auto_id, CheckboxSelectMultiple is a special case. Each checkbox # gets a distinct ID, formed by appending an underscore plus the checkbox's # zero-based index. class SongForm(Form): name = CharField() composers = MultipleChoiceField( choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')], widget=CheckboxSelectMultiple, ) f = SongForm(auto_id='%s_id') self.assertHTMLEqual( str(f['composers']), """<ul id="composers_id"> <li><label for="composers_id_0"> <input type="checkbox" name="composers" value="J" id="composers_id_0"> John Lennon</label></li> <li><label for="composers_id_1"> <input type="checkbox" name="composers" value="P" id="composers_id_1"> Paul McCartney</label></li> </ul>""" ) def test_multiple_choice_list_data(self): # Data for a MultipleChoiceField should be a list. QueryDict and # MultiValueDict conveniently work with this. class SongForm(Form): name = CharField() composers = MultipleChoiceField( choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')], widget=CheckboxSelectMultiple, ) data = {'name': 'Yesterday', 'composers': ['J', 'P']} f = SongForm(data) self.assertEqual(f.errors, {}) data = QueryDict('name=Yesterday&composers=J&composers=P') f = SongForm(data) self.assertEqual(f.errors, {}) data = MultiValueDict({'name': ['Yesterday'], 'composers': ['J', 'P']}) f = SongForm(data) self.assertEqual(f.errors, {}) # SelectMultiple uses ducktyping so that MultiValueDictLike.getlist() # is called. f = SongForm(MultiValueDictLike({'name': 'Yesterday', 'composers': 'J'})) self.assertEqual(f.errors, {}) self.assertEqual(f.cleaned_data['composers'], ['J']) def test_multiple_hidden(self): class SongForm(Form): name = CharField() composers = MultipleChoiceField( choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')], widget=CheckboxSelectMultiple, ) # The MultipleHiddenInput widget renders multiple values as hidden fields. class SongFormHidden(Form): name = CharField() composers = MultipleChoiceField( choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')], widget=MultipleHiddenInput, ) f = SongFormHidden(MultiValueDict({'name': ['Yesterday'], 'composers': ['J', 'P']}), auto_id=False) self.assertHTMLEqual( f.as_ul(), """<li>Name: <input type="text" name="name" value="Yesterday" required> <input type="hidden" name="composers" value="J"> <input type="hidden" name="composers" value="P"></li>""" ) # When using CheckboxSelectMultiple, the framework expects a list of input and # returns a list of input. f = SongForm({'name': 'Yesterday'}, auto_id=False) self.assertEqual(f.errors['composers'], ['This field is required.']) f = SongForm({'name': 'Yesterday', 'composers': ['J']}, auto_id=False) self.assertEqual(f.errors, {}) self.assertEqual(f.cleaned_data['composers'], ['J']) self.assertEqual(f.cleaned_data['name'], 'Yesterday') f = SongForm({'name': 'Yesterday', 'composers': ['J', 'P']}, auto_id=False) self.assertEqual(f.errors, {}) self.assertEqual(f.cleaned_data['composers'], ['J', 'P']) self.assertEqual(f.cleaned_data['name'], 'Yesterday') # MultipleHiddenInput uses ducktyping so that # MultiValueDictLike.getlist() is called. f = SongForm(MultiValueDictLike({'name': 'Yesterday', 'composers': 'J'})) self.assertEqual(f.errors, {}) self.assertEqual(f.cleaned_data['composers'], ['J']) def test_escaping(self): # Validation errors are HTML-escaped when output as HTML. class EscapingForm(Form): special_name = CharField(label="<em>Special</em> Field") special_safe_name = CharField(label=mark_safe("<em>Special</em> Field")) def clean_special_name(self): raise ValidationError("Something's wrong with '%s'" % self.cleaned_data['special_name']) def clean_special_safe_name(self): raise ValidationError( mark_safe("'<b>%s</b>' is a safe string" % self.cleaned_data['special_safe_name']) ) f = EscapingForm({ 'special_name': "Nothing to escape", 'special_safe_name': "Nothing to escape", }, auto_id=False) self.assertHTMLEqual( f.as_table(), """<tr><th>&lt;em&gt;Special&lt;/em&gt; Field:</th><td> <ul class="errorlist"><li>Something&#x27;s wrong with &#x27;Nothing to escape&#x27;</li></ul> <input type="text" name="special_name" value="Nothing to escape" required></td></tr> <tr><th><em>Special</em> Field:</th><td> <ul class="errorlist"><li>'<b>Nothing to escape</b>' is a safe string</li></ul> <input type="text" name="special_safe_name" value="Nothing to escape" required></td></tr>""" ) f = EscapingForm({ 'special_name': "Should escape < & > and <script>alert('xss')</script>", 'special_safe_name': "<i>Do not escape</i>" }, auto_id=False) self.assertHTMLEqual( f.as_table(), """<tr><th>&lt;em&gt;Special&lt;/em&gt; Field:</th><td> <ul class="errorlist"><li>Something&#x27;s wrong with &#x27;Should escape &lt; &amp; &gt; and &lt;script&gt;alert(&#x27;xss&#x27;)&lt;/script&gt;&#x27;</li></ul> <input type="text" name="special_name" value="Should escape &lt; &amp; &gt; and &lt;script&gt;alert(&#x27;xss&#x27;)&lt;/script&gt;" required></td></tr> <tr><th><em>Special</em> Field:</th><td> <ul class="errorlist"><li>'<b><i>Do not escape</i></b>' is a safe string</li></ul> <input type="text" name="special_safe_name" value="&lt;i&gt;Do not escape&lt;/i&gt;" required></td></tr>""" ) def test_validating_multiple_fields(self): # There are a couple of ways to do multiple-field validation. If you want the # validation message to be associated with a particular field, implement the # clean_XXX() method on the Form, where XXX is the field name. As in # Field.clean(), the clean_XXX() method should return the cleaned value. In the # clean_XXX() method, you have access to self.cleaned_data, which is a dictionary # of all the data that has been cleaned *so far*, in order by the fields, # including the current field (e.g., the field XXX if you're in clean_XXX()). class UserRegistration(Form): username = CharField(max_length=10) password1 = CharField(widget=PasswordInput) password2 = CharField(widget=PasswordInput) def clean_password2(self): if (self.cleaned_data.get('password1') and self.cleaned_data.get('password2') and self.cleaned_data['password1'] != self.cleaned_data['password2']): raise ValidationError('Please make sure your passwords match.') return self.cleaned_data['password2'] f = UserRegistration(auto_id=False) self.assertEqual(f.errors, {}) f = UserRegistration({}, auto_id=False) self.assertEqual(f.errors['username'], ['This field is required.']) self.assertEqual(f.errors['password1'], ['This field is required.']) self.assertEqual(f.errors['password2'], ['This field is required.']) f = UserRegistration({'username': 'adrian', 'password1': 'foo', 'password2': 'bar'}, auto_id=False) self.assertEqual(f.errors['password2'], ['Please make sure your passwords match.']) f = UserRegistration({'username': 'adrian', 'password1': 'foo', 'password2': 'foo'}, auto_id=False) self.assertEqual(f.errors, {}) self.assertEqual(f.cleaned_data['username'], 'adrian') self.assertEqual(f.cleaned_data['password1'], 'foo') self.assertEqual(f.cleaned_data['password2'], 'foo') # Another way of doing multiple-field validation is by implementing the # Form's clean() method. Usually ValidationError raised by that method # will not be associated with a particular field and will have a # special-case association with the field named '__all__'. It's # possible to associate the errors to particular field with the # Form.add_error() method or by passing a dictionary that maps each # field to one or more errors. # # Note that in Form.clean(), you have access to self.cleaned_data, a # dictionary of all the fields/values that have *not* raised a # ValidationError. Also note Form.clean() is required to return a # dictionary of all clean data. class UserRegistration(Form): username = CharField(max_length=10) password1 = CharField(widget=PasswordInput) password2 = CharField(widget=PasswordInput) def clean(self): # Test raising a ValidationError as NON_FIELD_ERRORS. if (self.cleaned_data.get('password1') and self.cleaned_data.get('password2') and self.cleaned_data['password1'] != self.cleaned_data['password2']): raise ValidationError('Please make sure your passwords match.') # Test raising ValidationError that targets multiple fields. errors = {} if self.cleaned_data.get('password1') == 'FORBIDDEN_VALUE': errors['password1'] = 'Forbidden value.' if self.cleaned_data.get('password2') == 'FORBIDDEN_VALUE': errors['password2'] = ['Forbidden value.'] if errors: raise ValidationError(errors) # Test Form.add_error() if self.cleaned_data.get('password1') == 'FORBIDDEN_VALUE2': self.add_error(None, 'Non-field error 1.') self.add_error('password1', 'Forbidden value 2.') if self.cleaned_data.get('password2') == 'FORBIDDEN_VALUE2': self.add_error('password2', 'Forbidden value 2.') raise ValidationError('Non-field error 2.') return self.cleaned_data f = UserRegistration(auto_id=False) self.assertEqual(f.errors, {}) f = UserRegistration({}, auto_id=False) self.assertHTMLEqual( f.as_table(), """<tr><th>Username:</th><td> <ul class="errorlist"><li>This field is required.</li></ul> <input type="text" name="username" maxlength="10" required></td></tr> <tr><th>Password1:</th><td><ul class="errorlist"><li>This field is required.</li></ul> <input type="password" name="password1" required></td></tr> <tr><th>Password2:</th><td><ul class="errorlist"><li>This field is required.</li></ul> <input type="password" name="password2" required></td></tr>""" ) self.assertEqual(f.errors['username'], ['This field is required.']) self.assertEqual(f.errors['password1'], ['This field is required.']) self.assertEqual(f.errors['password2'], ['This field is required.']) f = UserRegistration({'username': 'adrian', 'password1': 'foo', 'password2': 'bar'}, auto_id=False) self.assertEqual(f.errors['__all__'], ['Please make sure your passwords match.']) self.assertHTMLEqual( f.as_table(), """<tr><td colspan="2"> <ul class="errorlist nonfield"><li>Please make sure your passwords match.</li></ul></td></tr> <tr><th>Username:</th><td><input type="text" name="username" value="adrian" maxlength="10" required></td></tr> <tr><th>Password1:</th><td><input type="password" name="password1" required></td></tr> <tr><th>Password2:</th><td><input type="password" name="password2" required></td></tr>""" ) self.assertHTMLEqual( f.as_ul(), """<li><ul class="errorlist nonfield"> <li>Please make sure your passwords match.</li></ul></li> <li>Username: <input type="text" name="username" value="adrian" maxlength="10" required></li> <li>Password1: <input type="password" name="password1" required></li> <li>Password2: <input type="password" name="password2" required></li>""" ) f = UserRegistration({'username': 'adrian', 'password1': 'foo', 'password2': 'foo'}, auto_id=False) self.assertEqual(f.errors, {}) self.assertEqual(f.cleaned_data['username'], 'adrian') self.assertEqual(f.cleaned_data['password1'], 'foo') self.assertEqual(f.cleaned_data['password2'], 'foo') f = UserRegistration({ 'username': 'adrian', 'password1': 'FORBIDDEN_VALUE', 'password2': 'FORBIDDEN_VALUE', }, auto_id=False) self.assertEqual(f.errors['password1'], ['Forbidden value.']) self.assertEqual(f.errors['password2'], ['Forbidden value.']) f = UserRegistration({ 'username': 'adrian', 'password1': 'FORBIDDEN_VALUE2', 'password2': 'FORBIDDEN_VALUE2', }, auto_id=False) self.assertEqual(f.errors['__all__'], ['Non-field error 1.', 'Non-field error 2.']) self.assertEqual(f.errors['password1'], ['Forbidden value 2.']) self.assertEqual(f.errors['password2'], ['Forbidden value 2.']) with self.assertRaisesMessage(ValueError, "has no field named"): f.add_error('missing_field', 'Some error.') def test_update_error_dict(self): class CodeForm(Form): code = CharField(max_length=10) def clean(self): try: raise ValidationError({'code': [ValidationError('Code error 1.')]}) except ValidationError as e: self._errors = e.update_error_dict(self._errors) try: raise ValidationError({'code': [ValidationError('Code error 2.')]}) except ValidationError as e: self._errors = e.update_error_dict(self._errors) try: raise ValidationError({'code': forms.ErrorList(['Code error 3.'])}) except ValidationError as e: self._errors = e.update_error_dict(self._errors) try: raise ValidationError('Non-field error 1.') except ValidationError as e: self._errors = e.update_error_dict(self._errors) try: raise ValidationError([ValidationError('Non-field error 2.')]) except ValidationError as e: self._errors = e.update_error_dict(self._errors) # The newly added list of errors is an instance of ErrorList. for field, error_list in self._errors.items(): if not isinstance(error_list, self.error_class): self._errors[field] = self.error_class(error_list) form = CodeForm({'code': 'hello'}) # Trigger validation. self.assertFalse(form.is_valid()) # update_error_dict didn't lose track of the ErrorDict type. self.assertIsInstance(form._errors, forms.ErrorDict) self.assertEqual(dict(form.errors), { 'code': ['Code error 1.', 'Code error 2.', 'Code error 3.'], NON_FIELD_ERRORS: ['Non-field error 1.', 'Non-field error 2.'], }) def test_has_error(self): class UserRegistration(Form): username = CharField(max_length=10) password1 = CharField(widget=PasswordInput, min_length=5) password2 = CharField(widget=PasswordInput) def clean(self): if (self.cleaned_data.get('password1') and self.cleaned_data.get('password2') and self.cleaned_data['password1'] != self.cleaned_data['password2']): raise ValidationError( 'Please make sure your passwords match.', code='password_mismatch', ) f = UserRegistration(data={}) self.assertTrue(f.has_error('password1')) self.assertTrue(f.has_error('password1', 'required')) self.assertFalse(f.has_error('password1', 'anything')) f = UserRegistration(data={'password1': 'Hi', 'password2': 'Hi'}) self.assertTrue(f.has_error('password1')) self.assertTrue(f.has_error('password1', 'min_length')) self.assertFalse(f.has_error('password1', 'anything')) self.assertFalse(f.has_error('password2')) self.assertFalse(f.has_error('password2', 'anything')) f = UserRegistration(data={'password1': 'Bonjour', 'password2': 'Hello'}) self.assertFalse(f.has_error('password1')) self.assertFalse(f.has_error('password1', 'required')) self.assertTrue(f.has_error(NON_FIELD_ERRORS)) self.assertTrue(f.has_error(NON_FIELD_ERRORS, 'password_mismatch')) self.assertFalse(f.has_error(NON_FIELD_ERRORS, 'anything')) def test_dynamic_construction(self): # It's possible to construct a Form dynamically by adding to the self.fields # dictionary in __init__(). Don't forget to call Form.__init__() within the # subclass' __init__(). class Person(Form): first_name = CharField() last_name = CharField() def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.fields['birthday'] = DateField() p = Person(auto_id=False) self.assertHTMLEqual( p.as_table(), """<tr><th>First name:</th><td><input type="text" name="first_name" required></td></tr> <tr><th>Last name:</th><td><input type="text" name="last_name" required></td></tr> <tr><th>Birthday:</th><td><input type="text" name="birthday" required></td></tr>""" ) # Instances of a dynamic Form do not persist fields from one Form instance to # the next. class MyForm(Form): def __init__(self, data=None, auto_id=False, field_list=[]): Form.__init__(self, data, auto_id=auto_id) for field in field_list: self.fields[field[0]] = field[1] field_list = [('field1', CharField()), ('field2', CharField())] my_form = MyForm(field_list=field_list) self.assertHTMLEqual( my_form.as_table(), """<tr><th>Field1:</th><td><input type="text" name="field1" required></td></tr> <tr><th>Field2:</th><td><input type="text" name="field2" required></td></tr>""" ) field_list = [('field3', CharField()), ('field4', CharField())] my_form = MyForm(field_list=field_list) self.assertHTMLEqual( my_form.as_table(), """<tr><th>Field3:</th><td><input type="text" name="field3" required></td></tr> <tr><th>Field4:</th><td><input type="text" name="field4" required></td></tr>""" ) class MyForm(Form): default_field_1 = CharField() default_field_2 = CharField() def __init__(self, data=None, auto_id=False, field_list=[]): Form.__init__(self, data, auto_id=auto_id) for field in field_list: self.fields[field[0]] = field[1] field_list = [('field1', CharField()), ('field2', CharField())] my_form = MyForm(field_list=field_list) self.assertHTMLEqual( my_form.as_table(), """<tr><th>Default field 1:</th><td><input type="text" name="default_field_1" required></td></tr> <tr><th>Default field 2:</th><td><input type="text" name="default_field_2" required></td></tr> <tr><th>Field1:</th><td><input type="text" name="field1" required></td></tr> <tr><th>Field2:</th><td><input type="text" name="field2" required></td></tr>""" ) field_list = [('field3', CharField()), ('field4', CharField())] my_form = MyForm(field_list=field_list) self.assertHTMLEqual( my_form.as_table(), """<tr><th>Default field 1:</th><td><input type="text" name="default_field_1" required></td></tr> <tr><th>Default field 2:</th><td><input type="text" name="default_field_2" required></td></tr> <tr><th>Field3:</th><td><input type="text" name="field3" required></td></tr> <tr><th>Field4:</th><td><input type="text" name="field4" required></td></tr>""" ) # Similarly, changes to field attributes do not persist from one Form instance # to the next. class Person(Form): first_name = CharField(required=False) last_name = CharField(required=False) def __init__(self, names_required=False, *args, **kwargs): super().__init__(*args, **kwargs) if names_required: self.fields['first_name'].required = True self.fields['first_name'].widget.attrs['class'] = 'required' self.fields['last_name'].required = True self.fields['last_name'].widget.attrs['class'] = 'required' f = Person(names_required=False) self.assertEqual(f['first_name'].field.required, f['last_name'].field.required, (False, False)) self.assertEqual(f['first_name'].field.widget.attrs, f['last_name'].field.widget.attrs, ({}, {})) f = Person(names_required=True) self.assertEqual(f['first_name'].field.required, f['last_name'].field.required, (True, True)) self.assertEqual( f['first_name'].field.widget.attrs, f['last_name'].field.widget.attrs, ({'class': 'reuired'}, {'class': 'required'}) ) f = Person(names_required=False) self.assertEqual(f['first_name'].field.required, f['last_name'].field.required, (False, False)) self.assertEqual(f['first_name'].field.widget.attrs, f['last_name'].field.widget.attrs, ({}, {})) class Person(Form): first_name = CharField(max_length=30) last_name = CharField(max_length=30) def __init__(self, name_max_length=None, *args, **kwargs): super().__init__(*args, **kwargs) if name_max_length: self.fields['first_name'].max_length = name_max_length self.fields['last_name'].max_length = name_max_length f = Person(name_max_length=None) self.assertEqual(f['first_name'].field.max_length, f['last_name'].field.max_length, (30, 30)) f = Person(name_max_length=20) self.assertEqual(f['first_name'].field.max_length, f['last_name'].field.max_length, (20, 20)) f = Person(name_max_length=None) self.assertEqual(f['first_name'].field.max_length, f['last_name'].field.max_length, (30, 30)) # Similarly, choices do not persist from one Form instance to the next. # Refs #15127. class Person(Form): first_name = CharField(required=False) last_name = CharField(required=False) gender = ChoiceField(choices=(('f', 'Female'), ('m', 'Male'))) def __init__(self, allow_unspec_gender=False, *args, **kwargs): super().__init__(*args, **kwargs) if allow_unspec_gender: self.fields['gender'].choices += (('u', 'Unspecified'),) f = Person() self.assertEqual(f['gender'].field.choices, [('f', 'Female'), ('m', 'Male')]) f = Person(allow_unspec_gender=True) self.assertEqual(f['gender'].field.choices, [('f', 'Female'), ('m', 'Male'), ('u', 'Unspecified')]) f = Person() self.assertEqual(f['gender'].field.choices, [('f', 'Female'), ('m', 'Male')]) def test_validators_independence(self): """ The list of form field validators can be modified without polluting other forms. """ class MyForm(Form): myfield = CharField(max_length=25) f1 = MyForm() f2 = MyForm() f1.fields['myfield'].validators[0] = MaxValueValidator(12) self.assertNotEqual(f1.fields['myfield'].validators[0], f2.fields['myfield'].validators[0]) def test_hidden_widget(self): # HiddenInput widgets are displayed differently in the as_table(), as_ul()) # and as_p() output of a Form -- their verbose names are not displayed, and a # separate row is not displayed. They're displayed in the last row of the # form, directly after that row's form element. class Person(Form): first_name = CharField() last_name = CharField() hidden_text = CharField(widget=HiddenInput) birthday = DateField() p = Person(auto_id=False) self.assertHTMLEqual( p.as_table(), """<tr><th>First name:</th><td><input type="text" name="first_name" required></td></tr> <tr><th>Last name:</th><td><input type="text" name="last_name" required></td></tr> <tr><th>Birthday:</th> <td><input type="text" name="birthday" required><input type="hidden" name="hidden_text"></td></tr>""" ) self.assertHTMLEqual( p.as_ul(), """<li>First name: <input type="text" name="first_name" required></li> <li>Last name: <input type="text" name="last_name" required></li> <li>Birthday: <input type="text" name="birthday" required><input type="hidden" name="hidden_text"></li>""" ) self.assertHTMLEqual( p.as_p(), """<p>First name: <input type="text" name="first_name" required></p> <p>Last name: <input type="text" name="last_name" required></p> <p>Birthday: <input type="text" name="birthday" required><input type="hidden" name="hidden_text"></p>""" ) # With auto_id set, a HiddenInput still gets an ID, but it doesn't get a label. p = Person(auto_id='id_%s') self.assertHTMLEqual( p.as_table(), """<tr><th><label for="id_first_name">First name:</label></th><td> <input type="text" name="first_name" id="id_first_name" required></td></tr> <tr><th><label for="id_last_name">Last name:</label></th><td> <input type="text" name="last_name" id="id_last_name" required></td></tr> <tr><th><label for="id_birthday">Birthday:</label></th><td> <input type="text" name="birthday" id="id_birthday" required> <input type="hidden" name="hidden_text" id="id_hidden_text"></td></tr>""" ) self.assertHTMLEqual( p.as_ul(), """<li><label for="id_first_name">First name:</label> <input type="text" name="first_name" id="id_first_name" required></li> <li><label for="id_last_name">Last name:</label> <input type="text" name="last_name" id="id_last_name" required></li> <li><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" id="id_birthday" required> <input type="hidden" name="hidden_text" id="id_hidden_text"></li>""" ) self.assertHTMLEqual( p.as_p(), """<p><label for="id_first_name">First name:</label> <input type="text" name="first_name" id="id_first_name" required></p> <p><label for="id_last_name">Last name:</label> <input type="text" name="last_name" id="id_last_name" required></p> <p><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" id="id_birthday" required> <input type="hidden" name="hidden_text" id="id_hidden_text"></p>""" ) # If a field with a HiddenInput has errors, the as_table() and as_ul() output # will include the error message(s) with the text "(Hidden field [fieldname]) " # prepended. This message is displayed at the top of the output, regardless of # its field's order in the form. p = Person({'first_name': 'John', 'last_name': 'Lennon', 'birthday': '1940-10-9'}, auto_id=False) self.assertHTMLEqual( p.as_table(), """<tr><td colspan="2"> <ul class="errorlist nonfield"><li>(Hidden field hidden_text) This field is required.</li></ul></td></tr> <tr><th>First name:</th><td><input type="text" name="first_name" value="John" required></td></tr> <tr><th>Last name:</th><td><input type="text" name="last_name" value="Lennon" required></td></tr> <tr><th>Birthday:</th><td><input type="text" name="birthday" value="1940-10-9" required> <input type="hidden" name="hidden_text"></td></tr>""" ) self.assertHTMLEqual( p.as_ul(), """<li><ul class="errorlist nonfield"><li>(Hidden field hidden_text) This field is required.</li></ul></li> <li>First name: <input type="text" name="first_name" value="John" required></li> <li>Last name: <input type="text" name="last_name" value="Lennon" required></li> <li>Birthday: <input type="text" name="birthday" value="1940-10-9" required> <input type="hidden" name="hidden_text"></li>""" ) self.assertHTMLEqual( p.as_p(), """<ul class="errorlist nonfield"><li>(Hidden field hidden_text) This field is required.</li></ul> <p>First name: <input type="text" name="first_name" value="John" required></p> <p>Last name: <input type="text" name="last_name" value="Lennon" required></p> <p>Birthday: <input type="text" name="birthday" value="1940-10-9" required> <input type="hidden" name="hidden_text"></p>""" ) # A corner case: It's possible for a form to have only HiddenInputs. class TestForm(Form): foo = CharField(widget=HiddenInput) bar = CharField(widget=HiddenInput) p = TestForm(auto_id=False) self.assertHTMLEqual(p.as_table(), '<input type="hidden" name="foo"><input type="hidden" name="bar">') self.assertHTMLEqual(p.as_ul(), '<input type="hidden" name="foo"><input type="hidden" name="bar">') self.assertHTMLEqual(p.as_p(), '<input type="hidden" name="foo"><input type="hidden" name="bar">') def test_field_order(self): # A Form's fields are displayed in the same order in which they were defined. class TestForm(Form): field1 = CharField() field2 = CharField() field3 = CharField() field4 = CharField() field5 = CharField() field6 = CharField() field7 = CharField() field8 = CharField() field9 = CharField() field10 = CharField() field11 = CharField() field12 = CharField() field13 = CharField() field14 = CharField() p = TestForm(auto_id=False) self.assertHTMLEqual(p.as_table(), """<tr><th>Field1:</th><td><input type="text" name="field1" required></td></tr> <tr><th>Field2:</th><td><input type="text" name="field2" required></td></tr> <tr><th>Field3:</th><td><input type="text" name="field3" required></td></tr> <tr><th>Field4:</th><td><input type="text" name="field4" required></td></tr> <tr><th>Field5:</th><td><input type="text" name="field5" required></td></tr> <tr><th>Field6:</th><td><input type="text" name="field6" required></td></tr> <tr><th>Field7:</th><td><input type="text" name="field7" required></td></tr> <tr><th>Field8:</th><td><input type="text" name="field8" required></td></tr> <tr><th>Field9:</th><td><input type="text" name="field9" required></td></tr> <tr><th>Field10:</th><td><input type="text" name="field10" required></td></tr> <tr><th>Field11:</th><td><input type="text" name="field11" required></td></tr> <tr><th>Field12:</th><td><input type="text" name="field12" required></td></tr> <tr><th>Field13:</th><td><input type="text" name="field13" required></td></tr> <tr><th>Field14:</th><td><input type="text" name="field14" required></td></tr>""") def test_explicit_field_order(self): class TestFormParent(Form): field1 = CharField() field2 = CharField() field4 = CharField() field5 = CharField() field6 = CharField() field_order = ['field6', 'field5', 'field4', 'field2', 'field1'] class TestForm(TestFormParent): field3 = CharField() field_order = ['field2', 'field4', 'field3', 'field5', 'field6'] class TestFormRemove(TestForm): field1 = None class TestFormMissing(TestForm): field_order = ['field2', 'field4', 'field3', 'field5', 'field6', 'field1'] field1 = None class TestFormInit(TestFormParent): field3 = CharField() field_order = None def __init__(self, **kwargs): super().__init__(**kwargs) self.order_fields(field_order=TestForm.field_order) p = TestFormParent() self.assertEqual(list(p.fields), TestFormParent.field_order) p = TestFormRemove() self.assertEqual(list(p.fields), TestForm.field_order) p = TestFormMissing() self.assertEqual(list(p.fields), TestForm.field_order) p = TestForm() self.assertEqual(list(p.fields), TestFormMissing.field_order) p = TestFormInit() order = [*TestForm.field_order, 'field1'] self.assertEqual(list(p.fields), order) TestForm.field_order = ['unknown'] p = TestForm() self.assertEqual(list(p.fields), ['field1', 'field2', 'field4', 'field5', 'field6', 'field3']) def test_form_html_attributes(self): # Some Field classes have an effect on the HTML attributes of their associated # Widget. If you set max_length in a CharField and its associated widget is # either a TextInput or PasswordInput, then the widget's rendered HTML will # include the "maxlength" attribute. class UserRegistration(Form): username = CharField(max_length=10) # uses TextInput by default password = CharField(max_length=10, widget=PasswordInput) realname = CharField(max_length=10, widget=TextInput) # redundantly define widget, just to test address = CharField() # no max_length defined here p = UserRegistration(auto_id=False) self.assertHTMLEqual( p.as_ul(), """<li>Username: <input type="text" name="username" maxlength="10" required></li> <li>Password: <input type="password" name="password" maxlength="10" required></li> <li>Realname: <input type="text" name="realname" maxlength="10" required></li> <li>Address: <input type="text" name="address" required></li>""" ) # If you specify a custom "attrs" that includes the "maxlength" attribute, # the Field's max_length attribute will override whatever "maxlength" you specify # in "attrs". class UserRegistration(Form): username = CharField(max_length=10, widget=TextInput(attrs={'maxlength': 20})) password = CharField(max_length=10, widget=PasswordInput) p = UserRegistration(auto_id=False) self.assertHTMLEqual( p.as_ul(), """<li>Username: <input type="text" name="username" maxlength="10" required></li> <li>Password: <input type="password" name="password" maxlength="10" required></li>""" ) def test_specifying_labels(self): # You can specify the label for a field by using the 'label' argument to a Field # class. If you don't specify 'label', Django will use the field name with # underscores converted to spaces, and the initial letter capitalized. class UserRegistration(Form): username = CharField(max_length=10, label='Your username') password1 = CharField(widget=PasswordInput) password2 = CharField(widget=PasswordInput, label='Contraseña (de nuevo)') p = UserRegistration(auto_id=False) self.assertHTMLEqual( p.as_ul(), """<li>Your username: <input type="text" name="username" maxlength="10" required></li> <li>Password1: <input type="password" name="password1" required></li> <li>Contraseña (de nuevo): <input type="password" name="password2" required></li>""" ) # Labels for as_* methods will only end in a colon if they don't end in other # punctuation already. class Questions(Form): q1 = CharField(label='The first question') q2 = CharField(label='What is your name?') q3 = CharField(label='The answer to life is:') q4 = CharField(label='Answer this question!') q5 = CharField(label='The last question. Period.') self.assertHTMLEqual( Questions(auto_id=False).as_p(), """<p>The first question: <input type="text" name="q1" required></p> <p>What is your name? <input type="text" name="q2" required></p> <p>The answer to life is: <input type="text" name="q3" required></p> <p>Answer this question! <input type="text" name="q4" required></p> <p>The last question. Period. <input type="text" name="q5" required></p>""" ) self.assertHTMLEqual( Questions().as_p(), """<p><label for="id_q1">The first question:</label> <input type="text" name="q1" id="id_q1" required></p> <p><label for="id_q2">What is your name?</label> <input type="text" name="q2" id="id_q2" required></p> <p><label for="id_q3">The answer to life is:</label> <input type="text" name="q3" id="id_q3" required></p> <p><label for="id_q4">Answer this question!</label> <input type="text" name="q4" id="id_q4" required></p> <p><label for="id_q5">The last question. Period.</label> <input type="text" name="q5" id="id_q5" required></p>""" ) # If a label is set to the empty string for a field, that field won't get a label. class UserRegistration(Form): username = CharField(max_length=10, label='') password = CharField(widget=PasswordInput) p = UserRegistration(auto_id=False) self.assertHTMLEqual(p.as_ul(), """<li> <input type="text" name="username" maxlength="10" required></li> <li>Password: <input type="password" name="password" required></li>""") p = UserRegistration(auto_id='id_%s') self.assertHTMLEqual( p.as_ul(), """<li> <input id="id_username" type="text" name="username" maxlength="10" required></li> <li><label for="id_password">Password:</label> <input type="password" name="password" id="id_password" required></li>""" ) # If label is None, Django will auto-create the label from the field name. This # is default behavior. class UserRegistration(Form): username = CharField(max_length=10, label=None) password = CharField(widget=PasswordInput) p = UserRegistration(auto_id=False) self.assertHTMLEqual( p.as_ul(), """<li>Username: <input type="text" name="username" maxlength="10" required></li> <li>Password: <input type="password" name="password" required></li>""" ) p = UserRegistration(auto_id='id_%s') self.assertHTMLEqual( p.as_ul(), """<li><label for="id_username">Username:</label> <input id="id_username" type="text" name="username" maxlength="10" required></li> <li><label for="id_password">Password:</label> <input type="password" name="password" id="id_password" required></li>""" ) def test_label_suffix(self): # You can specify the 'label_suffix' argument to a Form class to modify the # punctuation symbol used at the end of a label. By default, the colon (:) is # used, and is only appended to the label if the label doesn't already end with a # punctuation symbol: ., !, ? or :. If you specify a different suffix, it will # be appended regardless of the last character of the label. class FavoriteForm(Form): color = CharField(label='Favorite color?') animal = CharField(label='Favorite animal') answer = CharField(label='Secret answer', label_suffix=' =') f = FavoriteForm(auto_id=False) self.assertHTMLEqual(f.as_ul(), """<li>Favorite color? <input type="text" name="color" required></li> <li>Favorite animal: <input type="text" name="animal" required></li> <li>Secret answer = <input type="text" name="answer" required></li>""") f = FavoriteForm(auto_id=False, label_suffix='?') self.assertHTMLEqual(f.as_ul(), """<li>Favorite color? <input type="text" name="color" required></li> <li>Favorite animal? <input type="text" name="animal" required></li> <li>Secret answer = <input type="text" name="answer" required></li>""") f = FavoriteForm(auto_id=False, label_suffix='') self.assertHTMLEqual(f.as_ul(), """<li>Favorite color? <input type="text" name="color" required></li> <li>Favorite animal <input type="text" name="animal" required></li> <li>Secret answer = <input type="text" name="answer" required></li>""") f = FavoriteForm(auto_id=False, label_suffix='\u2192') self.assertHTMLEqual( f.as_ul(), '<li>Favorite color? <input type="text" name="color" required></li>\n' '<li>Favorite animal\u2192 <input type="text" name="animal" required></li>\n' '<li>Secret answer = <input type="text" name="answer" required></li>' ) def test_initial_data(self): # You can specify initial data for a field by using the 'initial' argument to a # Field class. This initial data is displayed when a Form is rendered with *no* # data. It is not displayed when a Form is rendered with any data (including an # empty dictionary). Also, the initial value is *not* used if data for a # particular required field isn't provided. class UserRegistration(Form): username = CharField(max_length=10, initial='django') password = CharField(widget=PasswordInput) # Here, we're not submitting any data, so the initial value will be displayed.) p = UserRegistration(auto_id=False) self.assertHTMLEqual( p.as_ul(), """<li>Username: <input type="text" name="username" value="django" maxlength="10" required></li> <li>Password: <input type="password" name="password" required></li>""" ) # Here, we're submitting data, so the initial value will *not* be displayed. p = UserRegistration({}, auto_id=False) self.assertHTMLEqual( p.as_ul(), """<li><ul class="errorlist"><li>This field is required.</li></ul> Username: <input type="text" name="username" maxlength="10" required></li> <li><ul class="errorlist"><li>This field is required.</li></ul> Password: <input type="password" name="password" required></li>""" ) p = UserRegistration({'username': ''}, auto_id=False) self.assertHTMLEqual( p.as_ul(), """<li><ul class="errorlist"><li>This field is required.</li></ul> Username: <input type="text" name="username" maxlength="10" required></li> <li><ul class="errorlist"><li>This field is required.</li></ul> Password: <input type="password" name="password" required></li>""" ) p = UserRegistration({'username': 'foo'}, auto_id=False) self.assertHTMLEqual( p.as_ul(), """<li>Username: <input type="text" name="username" value="foo" maxlength="10" required></li> <li><ul class="errorlist"><li>This field is required.</li></ul> Password: <input type="password" name="password" required></li>""" ) # An 'initial' value is *not* used as a fallback if data is not provided. In this # example, we don't provide a value for 'username', and the form raises a # validation error rather than using the initial value for 'username'. p = UserRegistration({'password': 'secret'}) self.assertEqual(p.errors['username'], ['This field is required.']) self.assertFalse(p.is_valid()) def test_dynamic_initial_data(self): # The previous technique dealt with "hard-coded" initial data, but it's also # possible to specify initial data after you've already created the Form class # (i.e., at runtime). Use the 'initial' parameter to the Form constructor. This # should be a dictionary containing initial values for one or more fields in the # form, keyed by field name. class UserRegistration(Form): username = CharField(max_length=10) password = CharField(widget=PasswordInput) # Here, we're not submitting any data, so the initial value will be displayed.) p = UserRegistration(initial={'username': 'django'}, auto_id=False) self.assertHTMLEqual( p.as_ul(), """<li>Username: <input type="text" name="username" value="django" maxlength="10" required></li> <li>Password: <input type="password" name="password" required></li>""" ) p = UserRegistration(initial={'username': 'stephane'}, auto_id=False) self.assertHTMLEqual( p.as_ul(), """<li>Username: <input type="text" name="username" value="stephane" maxlength="10" required></li> <li>Password: <input type="password" name="password" required></li>""" ) # The 'initial' parameter is meaningless if you pass data. p = UserRegistration({}, initial={'username': 'django'}, auto_id=False) self.assertHTMLEqual( p.as_ul(), """<li><ul class="errorlist"><li>This field is required.</li></ul> Username: <input type="text" name="username" maxlength="10" required></li> <li><ul class="errorlist"><li>This field is required.</li></ul> Password: <input type="password" name="password" required></li>""" ) p = UserRegistration({'username': ''}, initial={'username': 'django'}, auto_id=False) self.assertHTMLEqual( p.as_ul(), """<li><ul class="errorlist"><li>This field is required.</li></ul> Username: <input type="text" name="username" maxlength="10" required></li> <li><ul class="errorlist"><li>This field is required.</li></ul> Password: <input type="password" name="password" required></li>""" ) p = UserRegistration({'username': 'foo'}, initial={'username': 'django'}, auto_id=False) self.assertHTMLEqual( p.as_ul(), """<li>Username: <input type="text" name="username" value="foo" maxlength="10" required></li> <li><ul class="errorlist"><li>This field is required.</li></ul> Password: <input type="password" name="password" required></li>""" ) # A dynamic 'initial' value is *not* used as a fallback if data is not provided. # In this example, we don't provide a value for 'username', and the form raises a # validation error rather than using the initial value for 'username'. p = UserRegistration({'password': 'secret'}, initial={'username': 'django'}) self.assertEqual(p.errors['username'], ['This field is required.']) self.assertFalse(p.is_valid()) # If a Form defines 'initial' *and* 'initial' is passed as a parameter to Form(), # then the latter will get precedence. class UserRegistration(Form): username = CharField(max_length=10, initial='django') password = CharField(widget=PasswordInput) p = UserRegistration(initial={'username': 'babik'}, auto_id=False) self.assertHTMLEqual( p.as_ul(), """<li>Username: <input type="text" name="username" value="babik" maxlength="10" required></li> <li>Password: <input type="password" name="password" required></li>""" ) def test_callable_initial_data(self): # The previous technique dealt with raw values as initial data, but it's also # possible to specify callable data. class UserRegistration(Form): username = CharField(max_length=10) password = CharField(widget=PasswordInput) options = MultipleChoiceField(choices=[('f', 'foo'), ('b', 'bar'), ('w', 'whiz')]) # We need to define functions that get called later.) def initial_django(): return 'django' def initial_stephane(): return 'stephane' def initial_options(): return ['f', 'b'] def initial_other_options(): return ['b', 'w'] # Here, we're not submitting any data, so the initial value will be displayed.) p = UserRegistration(initial={'username': initial_django, 'options': initial_options}, auto_id=False) self.assertHTMLEqual( p.as_ul(), """<li>Username: <input type="text" name="username" value="django" maxlength="10" required></li> <li>Password: <input type="password" name="password" required></li> <li>Options: <select multiple name="options" required> <option value="f" selected>foo</option> <option value="b" selected>bar</option> <option value="w">whiz</option> </select></li>""" ) # The 'initial' parameter is meaningless if you pass data. p = UserRegistration({}, initial={'username': initial_django, 'options': initial_options}, auto_id=False) self.assertHTMLEqual( p.as_ul(), """<li><ul class="errorlist"><li>This field is required.</li></ul> Username: <input type="text" name="username" maxlength="10" required></li> <li><ul class="errorlist"><li>This field is required.</li></ul> Password: <input type="password" name="password" required></li> <li><ul class="errorlist"><li>This field is required.</li></ul> Options: <select multiple name="options" required> <option value="f">foo</option> <option value="b">bar</option> <option value="w">whiz</option> </select></li>""" ) p = UserRegistration({'username': ''}, initial={'username': initial_django}, auto_id=False) self.assertHTMLEqual( p.as_ul(), """<li><ul class="errorlist"><li>This field is required.</li></ul> Username: <input type="text" name="username" maxlength="10" required></li> <li><ul class="errorlist"><li>This field is required.</li></ul> Password: <input type="password" name="password" required></li> <li><ul class="errorlist"><li>This field is required.</li></ul> Options: <select multiple name="options" required> <option value="f">foo</option> <option value="b">bar</option> <option value="w">whiz</option> </select></li>""" ) p = UserRegistration( {'username': 'foo', 'options': ['f', 'b']}, initial={'username': initial_django}, auto_id=False ) self.assertHTMLEqual( p.as_ul(), """<li>Username: <input type="text" name="username" value="foo" maxlength="10" required></li> <li><ul class="errorlist"><li>This field is required.</li></ul> Password: <input type="password" name="password" required></li> <li>Options: <select multiple name="options" required> <option value="f" selected>foo</option> <option value="b" selected>bar</option> <option value="w">whiz</option> </select></li>""" ) # A callable 'initial' value is *not* used as a fallback if data is not provided. # In this example, we don't provide a value for 'username', and the form raises a # validation error rather than using the initial value for 'username'. p = UserRegistration({'password': 'secret'}, initial={'username': initial_django, 'options': initial_options}) self.assertEqual(p.errors['username'], ['This field is required.']) self.assertFalse(p.is_valid()) # If a Form defines 'initial' *and* 'initial' is passed as a parameter to Form(), # then the latter will get precedence. class UserRegistration(Form): username = CharField(max_length=10, initial=initial_django) password = CharField(widget=PasswordInput) options = MultipleChoiceField( choices=[('f', 'foo'), ('b', 'bar'), ('w', 'whiz')], initial=initial_other_options, ) p = UserRegistration(auto_id=False) self.assertHTMLEqual( p.as_ul(), """<li>Username: <input type="text" name="username" value="django" maxlength="10" required></li> <li>Password: <input type="password" name="password" required></li> <li>Options: <select multiple name="options" required> <option value="f">foo</option> <option value="b" selected>bar</option> <option value="w" selected>whiz</option> </select></li>""" ) p = UserRegistration(initial={'username': initial_stephane, 'options': initial_options}, auto_id=False) self.assertHTMLEqual( p.as_ul(), """<li>Username: <input type="text" name="username" value="stephane" maxlength="10" required></li> <li>Password: <input type="password" name="password" required></li> <li>Options: <select multiple name="options" required> <option value="f" selected>foo</option> <option value="b" selected>bar</option> <option value="w">whiz</option> </select></li>""" ) def test_get_initial_for_field(self): class PersonForm(Form): first_name = CharField(initial='John') last_name = CharField(initial='Doe') age = IntegerField() occupation = CharField(initial=lambda: 'Unknown') form = PersonForm(initial={'first_name': 'Jane'}) self.assertEqual(form.get_initial_for_field(form.fields['age'], 'age'), None) self.assertEqual(form.get_initial_for_field(form.fields['last_name'], 'last_name'), 'Doe') # Form.initial overrides Field.initial. self.assertEqual(form.get_initial_for_field(form.fields['first_name'], 'first_name'), 'Jane') # Callables are evaluated. self.assertEqual(form.get_initial_for_field(form.fields['occupation'], 'occupation'), 'Unknown') def test_changed_data(self): class Person(Form): first_name = CharField(initial='Hans') last_name = CharField(initial='Greatel') birthday = DateField(initial=datetime.date(1974, 8, 16)) p = Person(data={'first_name': 'Hans', 'last_name': 'Scrmbl', 'birthday': '1974-08-16'}) self.assertTrue(p.is_valid()) self.assertNotIn('first_name', p.changed_data) self.assertIn('last_name', p.changed_data) self.assertNotIn('birthday', p.changed_data) # A field raising ValidationError is always in changed_data class PedanticField(forms.Field): def to_python(self, value): raise ValidationError('Whatever') class Person2(Person): pedantic = PedanticField(initial='whatever', show_hidden_initial=True) p = Person2(data={ 'first_name': 'Hans', 'last_name': 'Scrmbl', 'birthday': '1974-08-16', 'initial-pedantic': 'whatever', }) self.assertFalse(p.is_valid()) self.assertIn('pedantic', p.changed_data) def test_boundfield_values(self): # It's possible to get to the value which would be used for rendering # the widget for a field by using the BoundField's value method. class UserRegistration(Form): username = CharField(max_length=10, initial='djangonaut') password = CharField(widget=PasswordInput) unbound = UserRegistration() bound = UserRegistration({'password': 'foo'}) self.assertIsNone(bound['username'].value()) self.assertEqual(unbound['username'].value(), 'djangonaut') self.assertEqual(bound['password'].value(), 'foo') self.assertIsNone(unbound['password'].value()) def test_boundfield_initial_called_once(self): """ Multiple calls to BoundField().value() in an unbound form should return the same result each time (#24391). """ class MyForm(Form): name = CharField(max_length=10, initial=uuid.uuid4) form = MyForm() name = form['name'] self.assertEqual(name.value(), name.value()) # BoundField is also cached self.assertIs(form['name'], name) def test_boundfield_value_disabled_callable_initial(self): class PersonForm(Form): name = CharField(initial=lambda: 'John Doe', disabled=True) # Without form data. form = PersonForm() self.assertEqual(form['name'].value(), 'John Doe') # With form data. As the field is disabled, the value should not be # affected by the form data. form = PersonForm({}) self.assertEqual(form['name'].value(), 'John Doe') def test_custom_boundfield(self): class CustomField(CharField): def get_bound_field(self, form, name): return (form, name) class SampleForm(Form): name = CustomField() f = SampleForm() self.assertEqual(f['name'], (f, 'name')) def test_initial_datetime_values(self): now = datetime.datetime.now() # Nix microseconds (since they should be ignored). #22502 now_no_ms = now.replace(microsecond=0) if now == now_no_ms: now = now.replace(microsecond=1) def delayed_now(): return now def delayed_now_time(): return now.time() class HiddenInputWithoutMicrosec(HiddenInput): supports_microseconds = False class TextInputWithoutMicrosec(TextInput): supports_microseconds = False class DateTimeForm(Form): auto_timestamp = DateTimeField(initial=delayed_now) auto_time_only = TimeField(initial=delayed_now_time) supports_microseconds = DateTimeField(initial=delayed_now, widget=TextInput) hi_default_microsec = DateTimeField(initial=delayed_now, widget=HiddenInput) hi_without_microsec = DateTimeField(initial=delayed_now, widget=HiddenInputWithoutMicrosec) ti_without_microsec = DateTimeField(initial=delayed_now, widget=TextInputWithoutMicrosec) unbound = DateTimeForm() self.assertEqual(unbound['auto_timestamp'].value(), now_no_ms) self.assertEqual(unbound['auto_time_only'].value(), now_no_ms.time()) self.assertEqual(unbound['supports_microseconds'].value(), now) self.assertEqual(unbound['hi_default_microsec'].value(), now) self.assertEqual(unbound['hi_without_microsec'].value(), now_no_ms) self.assertEqual(unbound['ti_without_microsec'].value(), now_no_ms) def test_datetime_clean_initial_callable_disabled(self): now = datetime.datetime(2006, 10, 25, 14, 30, 45, 123456) class DateTimeForm(forms.Form): dt = DateTimeField(initial=lambda: now, disabled=True) form = DateTimeForm({}) self.assertEqual(form.errors, {}) self.assertEqual(form.cleaned_data, {'dt': now}) def test_datetime_changed_data_callable_with_microseconds(self): class DateTimeForm(forms.Form): dt = DateTimeField(initial=lambda: datetime.datetime(2006, 10, 25, 14, 30, 45, 123456), disabled=True) form = DateTimeForm({'dt': '2006-10-25 14:30:45'}) self.assertEqual(form.changed_data, []) def test_help_text(self): # You can specify descriptive text for a field by using the 'help_text' argument) class UserRegistration(Form): username = CharField(max_length=10, help_text='e.g., [email protected]') password = CharField(widget=PasswordInput, help_text='Wählen Sie mit Bedacht.') p = UserRegistration(auto_id=False) self.assertHTMLEqual( p.as_ul(), """<li>Username: <input type="text" name="username" maxlength="10" required> <span class="helptext">e.g., [email protected]</span></li> <li>Password: <input type="password" name="password" required> <span class="helptext">Wählen Sie mit Bedacht.</span></li>""" ) self.assertHTMLEqual( p.as_p(), """<p>Username: <input type="text" name="username" maxlength="10" required> <span class="helptext">e.g., [email protected]</span></p> <p>Password: <input type="password" name="password" required> <span class="helptext">Wählen Sie mit Bedacht.</span></p>""" ) self.assertHTMLEqual( p.as_table(), """<tr><th>Username:</th><td><input type="text" name="username" maxlength="10" required><br> <span class="helptext">e.g., [email protected]</span></td></tr> <tr><th>Password:</th><td><input type="password" name="password" required><br> <span class="helptext">Wählen Sie mit Bedacht.</span></td></tr>""" ) # The help text is displayed whether or not data is provided for the form. p = UserRegistration({'username': 'foo'}, auto_id=False) self.assertHTMLEqual( p.as_ul(), """<li>Username: <input type="text" name="username" value="foo" maxlength="10" required> <span class="helptext">e.g., [email protected]</span></li> <li><ul class="errorlist"><li>This field is required.</li></ul> Password: <input type="password" name="password" required> <span class="helptext">Wählen Sie mit Bedacht.</span></li>""" ) # help_text is not displayed for hidden fields. It can be used for documentation # purposes, though. class UserRegistration(Form): username = CharField(max_length=10, help_text='e.g., [email protected]') password = CharField(widget=PasswordInput) next = CharField(widget=HiddenInput, initial='/', help_text='Redirect destination') p = UserRegistration(auto_id=False) self.assertHTMLEqual( p.as_ul(), """<li>Username: <input type="text" name="username" maxlength="10" required> <span class="helptext">e.g., [email protected]</span></li> <li>Password: <input type="password" name="password" required> <input type="hidden" name="next" value="/"></li>""" ) def test_subclassing_forms(self): # You can subclass a Form to add fields. The resulting form subclass will have # all of the fields of the parent Form, plus whichever fields you define in the # subclass. class Person(Form): first_name = CharField() last_name = CharField() birthday = DateField() class Musician(Person): instrument = CharField() p = Person(auto_id=False) self.assertHTMLEqual( p.as_ul(), """<li>First name: <input type="text" name="first_name" required></li> <li>Last name: <input type="text" name="last_name" required></li> <li>Birthday: <input type="text" name="birthday" required></li>""" ) m = Musician(auto_id=False) self.assertHTMLEqual( m.as_ul(), """<li>First name: <input type="text" name="first_name" required></li> <li>Last name: <input type="text" name="last_name" required></li> <li>Birthday: <input type="text" name="birthday" required></li> <li>Instrument: <input type="text" name="instrument" required></li>""" ) # Yes, you can subclass multiple forms. The fields are added in the order in # which the parent classes are listed. class Person(Form): first_name = CharField() last_name = CharField() birthday = DateField() class Instrument(Form): instrument = CharField() class Beatle(Person, Instrument): haircut_type = CharField() b = Beatle(auto_id=False) self.assertHTMLEqual(b.as_ul(), """<li>Instrument: <input type="text" name="instrument" required></li> <li>First name: <input type="text" name="first_name" required></li> <li>Last name: <input type="text" name="last_name" required></li> <li>Birthday: <input type="text" name="birthday" required></li> <li>Haircut type: <input type="text" name="haircut_type" required></li>""") def test_forms_with_prefixes(self): # Sometimes it's necessary to have multiple forms display on the same HTML page, # or multiple copies of the same form. We can accomplish this with form prefixes. # Pass the keyword argument 'prefix' to the Form constructor to use this feature. # This value will be prepended to each HTML form field name. One way to think # about this is "namespaces for HTML forms". Notice that in the data argument, # each field's key has the prefix, in this case 'person1', prepended to the # actual field name. class Person(Form): first_name = CharField() last_name = CharField() birthday = DateField() data = { 'person1-first_name': 'John', 'person1-last_name': 'Lennon', 'person1-birthday': '1940-10-9' } p = Person(data, prefix='person1') self.assertHTMLEqual( p.as_ul(), """<li><label for="id_person1-first_name">First name:</label> <input type="text" name="person1-first_name" value="John" id="id_person1-first_name" required></li> <li><label for="id_person1-last_name">Last name:</label> <input type="text" name="person1-last_name" value="Lennon" id="id_person1-last_name" required></li> <li><label for="id_person1-birthday">Birthday:</label> <input type="text" name="person1-birthday" value="1940-10-9" id="id_person1-birthday" required></li>""" ) self.assertHTMLEqual( str(p['first_name']), '<input type="text" name="person1-first_name" value="John" id="id_person1-first_name" required>' ) self.assertHTMLEqual( str(p['last_name']), '<input type="text" name="person1-last_name" value="Lennon" id="id_person1-last_name" required>' ) self.assertHTMLEqual( str(p['birthday']), '<input type="text" name="person1-birthday" value="1940-10-9" id="id_person1-birthday" required>' ) self.assertEqual(p.errors, {}) self.assertTrue(p.is_valid()) self.assertEqual(p.cleaned_data['first_name'], 'John') self.assertEqual(p.cleaned_data['last_name'], 'Lennon') self.assertEqual(p.cleaned_data['birthday'], datetime.date(1940, 10, 9)) # Let's try submitting some bad data to make sure form.errors and field.errors # work as expected. data = { 'person1-first_name': '', 'person1-last_name': '', 'person1-birthday': '' } p = Person(data, prefix='person1') self.assertEqual(p.errors['first_name'], ['This field is required.']) self.assertEqual(p.errors['last_name'], ['This field is required.']) self.assertEqual(p.errors['birthday'], ['This field is required.']) self.assertEqual(p['first_name'].errors, ['This field is required.']) # Accessing a nonexistent field. with self.assertRaises(KeyError): p['person1-first_name'].errors # In this example, the data doesn't have a prefix, but the form requires it, so # the form doesn't "see" the fields. data = { 'first_name': 'John', 'last_name': 'Lennon', 'birthday': '1940-10-9' } p = Person(data, prefix='person1') self.assertEqual(p.errors['first_name'], ['This field is required.']) self.assertEqual(p.errors['last_name'], ['This field is required.']) self.assertEqual(p.errors['birthday'], ['This field is required.']) # With prefixes, a single data dictionary can hold data for multiple instances # of the same form. data = { 'person1-first_name': 'John', 'person1-last_name': 'Lennon', 'person1-birthday': '1940-10-9', 'person2-first_name': 'Jim', 'person2-last_name': 'Morrison', 'person2-birthday': '1943-12-8' } p1 = Person(data, prefix='person1') self.assertTrue(p1.is_valid()) self.assertEqual(p1.cleaned_data['first_name'], 'John') self.assertEqual(p1.cleaned_data['last_name'], 'Lennon') self.assertEqual(p1.cleaned_data['birthday'], datetime.date(1940, 10, 9)) p2 = Person(data, prefix='person2') self.assertTrue(p2.is_valid()) self.assertEqual(p2.cleaned_data['first_name'], 'Jim') self.assertEqual(p2.cleaned_data['last_name'], 'Morrison') self.assertEqual(p2.cleaned_data['birthday'], datetime.date(1943, 12, 8)) # By default, forms append a hyphen between the prefix and the field name, but a # form can alter that behavior by implementing the add_prefix() method. This # method takes a field name and returns the prefixed field, according to # self.prefix. class Person(Form): first_name = CharField() last_name = CharField() birthday = DateField() def add_prefix(self, field_name): return '%s-prefix-%s' % (self.prefix, field_name) if self.prefix else field_name p = Person(prefix='foo') self.assertHTMLEqual( p.as_ul(), """<li><label for="id_foo-prefix-first_name">First name:</label> <input type="text" name="foo-prefix-first_name" id="id_foo-prefix-first_name" required></li> <li><label for="id_foo-prefix-last_name">Last name:</label> <input type="text" name="foo-prefix-last_name" id="id_foo-prefix-last_name" required></li> <li><label for="id_foo-prefix-birthday">Birthday:</label> <input type="text" name="foo-prefix-birthday" id="id_foo-prefix-birthday" required></li>""" ) data = { 'foo-prefix-first_name': 'John', 'foo-prefix-last_name': 'Lennon', 'foo-prefix-birthday': '1940-10-9' } p = Person(data, prefix='foo') self.assertTrue(p.is_valid()) self.assertEqual(p.cleaned_data['first_name'], 'John') self.assertEqual(p.cleaned_data['last_name'], 'Lennon') self.assertEqual(p.cleaned_data['birthday'], datetime.date(1940, 10, 9)) def test_class_prefix(self): # Prefix can be also specified at the class level. class Person(Form): first_name = CharField() prefix = 'foo' p = Person() self.assertEqual(p.prefix, 'foo') p = Person(prefix='bar') self.assertEqual(p.prefix, 'bar') def test_forms_with_null_boolean(self): # NullBooleanField is a bit of a special case because its presentation (widget) # is different than its data. This is handled transparently, though. class Person(Form): name = CharField() is_cool = NullBooleanField() p = Person({'name': 'Joe'}, auto_id=False) self.assertHTMLEqual(str(p['is_cool']), """<select name="is_cool"> <option value="unknown" selected>Unknown</option> <option value="true">Yes</option> <option value="false">No</option> </select>""") p = Person({'name': 'Joe', 'is_cool': '1'}, auto_id=False) self.assertHTMLEqual(str(p['is_cool']), """<select name="is_cool"> <option value="unknown" selected>Unknown</option> <option value="true">Yes</option> <option value="false">No</option> </select>""") p = Person({'name': 'Joe', 'is_cool': '2'}, auto_id=False) self.assertHTMLEqual(str(p['is_cool']), """<select name="is_cool"> <option value="unknown">Unknown</option> <option value="true" selected>Yes</option> <option value="false">No</option> </select>""") p = Person({'name': 'Joe', 'is_cool': '3'}, auto_id=False) self.assertHTMLEqual(str(p['is_cool']), """<select name="is_cool"> <option value="unknown">Unknown</option> <option value="true">Yes</option> <option value="false" selected>No</option> </select>""") p = Person({'name': 'Joe', 'is_cool': True}, auto_id=False) self.assertHTMLEqual(str(p['is_cool']), """<select name="is_cool"> <option value="unknown">Unknown</option> <option value="true" selected>Yes</option> <option value="false">No</option> </select>""") p = Person({'name': 'Joe', 'is_cool': False}, auto_id=False) self.assertHTMLEqual(str(p['is_cool']), """<select name="is_cool"> <option value="unknown">Unknown</option> <option value="true">Yes</option> <option value="false" selected>No</option> </select>""") p = Person({'name': 'Joe', 'is_cool': 'unknown'}, auto_id=False) self.assertHTMLEqual(str(p['is_cool']), """<select name="is_cool"> <option value="unknown" selected>Unknown</option> <option value="true">Yes</option> <option value="false">No</option> </select>""") p = Person({'name': 'Joe', 'is_cool': 'true'}, auto_id=False) self.assertHTMLEqual(str(p['is_cool']), """<select name="is_cool"> <option value="unknown">Unknown</option> <option value="true" selected>Yes</option> <option value="false">No</option> </select>""") p = Person({'name': 'Joe', 'is_cool': 'false'}, auto_id=False) self.assertHTMLEqual(str(p['is_cool']), """<select name="is_cool"> <option value="unknown">Unknown</option> <option value="true">Yes</option> <option value="false" selected>No</option> </select>""") def test_forms_with_file_fields(self): # FileFields are a special case because they take their data from the request.FILES, # not request.POST. class FileForm(Form): file1 = FileField() f = FileForm(auto_id=False) self.assertHTMLEqual( f.as_table(), '<tr><th>File1:</th><td><input type="file" name="file1" required></td></tr>', ) f = FileForm(data={}, files={}, auto_id=False) self.assertHTMLEqual( f.as_table(), '<tr><th>File1:</th><td>' '<ul class="errorlist"><li>This field is required.</li></ul>' '<input type="file" name="file1" required></td></tr>' ) f = FileForm(data={}, files={'file1': SimpleUploadedFile('name', b'')}, auto_id=False) self.assertHTMLEqual( f.as_table(), '<tr><th>File1:</th><td>' '<ul class="errorlist"><li>The submitted file is empty.</li></ul>' '<input type="file" name="file1" required></td></tr>' ) f = FileForm(data={}, files={'file1': 'something that is not a file'}, auto_id=False) self.assertHTMLEqual( f.as_table(), '<tr><th>File1:</th><td>' '<ul class="errorlist"><li>No file was submitted. Check the ' 'encoding type on the form.</li></ul>' '<input type="file" name="file1" required></td></tr>' ) f = FileForm(data={}, files={'file1': SimpleUploadedFile('name', b'some content')}, auto_id=False) self.assertHTMLEqual( f.as_table(), '<tr><th>File1:</th><td><input type="file" name="file1" required></td></tr>', ) self.assertTrue(f.is_valid()) file1 = SimpleUploadedFile('我隻氣墊船裝滿晒鱔.txt', 'मेरी मँडराने वाली नाव सर्पमीनों से भरी ह'.encode()) f = FileForm(data={}, files={'file1': file1}, auto_id=False) self.assertHTMLEqual( f.as_table(), '<tr><th>File1:</th><td><input type="file" name="file1" required></td></tr>', ) # A required file field with initial data should not contain the # required HTML attribute. The file input is left blank by the user to # keep the existing, initial value. f = FileForm(initial={'file1': 'resume.txt'}, auto_id=False) self.assertHTMLEqual( f.as_table(), '<tr><th>File1:</th><td><input type="file" name="file1"></td></tr>', ) def test_filefield_initial_callable(self): class FileForm(forms.Form): file1 = forms.FileField(initial=lambda: 'resume.txt') f = FileForm({}) self.assertEqual(f.errors, {}) self.assertEqual(f.cleaned_data['file1'], 'resume.txt') def test_basic_processing_in_view(self): class UserRegistration(Form): username = CharField(max_length=10) password1 = CharField(widget=PasswordInput) password2 = CharField(widget=PasswordInput) def clean(self): if (self.cleaned_data.get('password1') and self.cleaned_data.get('password2') and self.cleaned_data['password1'] != self.cleaned_data['password2']): raise ValidationError('Please make sure your passwords match.') return self.cleaned_data def my_function(method, post_data): if method == 'POST': form = UserRegistration(post_data, auto_id=False) else: form = UserRegistration(auto_id=False) if form.is_valid(): return 'VALID: %r' % sorted(form.cleaned_data.items()) t = Template( '<form method="post">\n' '<table>\n{{ form }}\n</table>\n<input type="submit" required>\n</form>' ) return t.render(Context({'form': form})) # Case 1: GET (an empty form, with no errors).) self.assertHTMLEqual(my_function('GET', {}), """<form method="post"> <table> <tr><th>Username:</th><td><input type="text" name="username" maxlength="10" required></td></tr> <tr><th>Password1:</th><td><input type="password" name="password1" required></td></tr> <tr><th>Password2:</th><td><input type="password" name="password2" required></td></tr> </table> <input type="submit" required> </form>""") # Case 2: POST with erroneous data (a redisplayed form, with errors).) self.assertHTMLEqual( my_function('POST', {'username': 'this-is-a-long-username', 'password1': 'foo', 'password2': 'bar'}), """<form method="post"> <table> <tr><td colspan="2"><ul class="errorlist nonfield"><li>Please make sure your passwords match.</li></ul></td></tr> <tr><th>Username:</th><td><ul class="errorlist"> <li>Ensure this value has at most 10 characters (it has 23).</li></ul> <input type="text" name="username" value="this-is-a-long-username" maxlength="10" required></td></tr> <tr><th>Password1:</th><td><input type="password" name="password1" required></td></tr> <tr><th>Password2:</th><td><input type="password" name="password2" required></td></tr> </table> <input type="submit" required> </form>""" ) # Case 3: POST with valid data (the success message).) self.assertEqual( my_function('POST', {'username': 'adrian', 'password1': 'secret', 'password2': 'secret'}), "VALID: [('password1', 'secret'), ('password2', 'secret'), ('username', 'adrian')]" ) def test_templates_with_forms(self): class UserRegistration(Form): username = CharField(max_length=10, help_text="Good luck picking a username that doesn't already exist.") password1 = CharField(widget=PasswordInput) password2 = CharField(widget=PasswordInput) def clean(self): if (self.cleaned_data.get('password1') and self.cleaned_data.get('password2') and self.cleaned_data['password1'] != self.cleaned_data['password2']): raise ValidationError('Please make sure your passwords match.') return self.cleaned_data # You have full flexibility in displaying form fields in a template. Just pass a # Form instance to the template, and use "dot" access to refer to individual # fields. Note, however, that this flexibility comes with the responsibility of # displaying all the errors, including any that might not be associated with a # particular field. t = Template('''<form> {{ form.username.errors.as_ul }}<p><label>Your username: {{ form.username }}</label></p> {{ form.password1.errors.as_ul }}<p><label>Password: {{ form.password1 }}</label></p> {{ form.password2.errors.as_ul }}<p><label>Password (again): {{ form.password2 }}</label></p> <input type="submit" required> </form>''') self.assertHTMLEqual(t.render(Context({'form': UserRegistration(auto_id=False)})), """<form> <p><label>Your username: <input type="text" name="username" maxlength="10" required></label></p> <p><label>Password: <input type="password" name="password1" required></label></p> <p><label>Password (again): <input type="password" name="password2" required></label></p> <input type="submit" required> </form>""") self.assertHTMLEqual( t.render(Context({'form': UserRegistration({'username': 'django'}, auto_id=False)})), """<form> <p><label>Your username: <input type="text" name="username" value="django" maxlength="10" required></label></p> <ul class="errorlist"><li>This field is required.</li></ul><p> <label>Password: <input type="password" name="password1" required></label></p> <ul class="errorlist"><li>This field is required.</li></ul> <p><label>Password (again): <input type="password" name="password2" required></label></p> <input type="submit" required> </form>""" ) # Use form.[field].label to output a field's label. You can specify the label for # a field by using the 'label' argument to a Field class. If you don't specify # 'label', Django will use the field name with underscores converted to spaces, # and the initial letter capitalized. t = Template('''<form> <p><label>{{ form.username.label }}: {{ form.username }}</label></p> <p><label>{{ form.password1.label }}: {{ form.password1 }}</label></p> <p><label>{{ form.password2.label }}: {{ form.password2 }}</label></p> <input type="submit" required> </form>''') self.assertHTMLEqual(t.render(Context({'form': UserRegistration(auto_id=False)})), """<form> <p><label>Username: <input type="text" name="username" maxlength="10" required></label></p> <p><label>Password1: <input type="password" name="password1" required></label></p> <p><label>Password2: <input type="password" name="password2" required></label></p> <input type="submit" required> </form>""") # User form.[field].label_tag to output a field's label with a <label> tag # wrapped around it, but *only* if the given field has an "id" attribute. # Recall from above that passing the "auto_id" argument to a Form gives each # field an "id" attribute. t = Template('''<form> <p>{{ form.username.label_tag }} {{ form.username }}</p> <p>{{ form.password1.label_tag }} {{ form.password1 }}</p> <p>{{ form.password2.label_tag }} {{ form.password2 }}</p> <input type="submit" required> </form>''') self.assertHTMLEqual(t.render(Context({'form': UserRegistration(auto_id=False)})), """<form> <p>Username: <input type="text" name="username" maxlength="10" required></p> <p>Password1: <input type="password" name="password1" required></p> <p>Password2: <input type="password" name="password2" required></p> <input type="submit" required> </form>""") self.assertHTMLEqual(t.render(Context({'form': UserRegistration(auto_id='id_%s')})), """<form> <p><label for="id_username">Username:</label> <input id="id_username" type="text" name="username" maxlength="10" required></p> <p><label for="id_password1">Password1:</label> <input type="password" name="password1" id="id_password1" required></p> <p><label for="id_password2">Password2:</label> <input type="password" name="password2" id="id_password2" required></p> <input type="submit" required> </form>""") # User form.[field].help_text to output a field's help text. If the given field # does not have help text, nothing will be output. t = Template('''<form> <p>{{ form.username.label_tag }} {{ form.username }}<br>{{ form.username.help_text }}</p> <p>{{ form.password1.label_tag }} {{ form.password1 }}</p> <p>{{ form.password2.label_tag }} {{ form.password2 }}</p> <input type="submit" required> </form>''') self.assertHTMLEqual( t.render(Context({'form': UserRegistration(auto_id=False)})), """<form> <p>Username: <input type="text" name="username" maxlength="10" required><br> Good luck picking a username that doesn&#x27;t already exist.</p> <p>Password1: <input type="password" name="password1" required></p> <p>Password2: <input type="password" name="password2" required></p> <input type="submit" required> </form>""" ) self.assertEqual( Template('{{ form.password1.help_text }}').render(Context({'form': UserRegistration(auto_id=False)})), '' ) # To display the errors that aren't associated with a particular field -- e.g., # the errors caused by Form.clean() -- use {{ form.non_field_errors }} in the # template. If used on its own, it is displayed as a <ul> (or an empty string, if # the list of errors is empty). You can also use it in {% if %} statements. t = Template('''<form> {{ form.username.errors.as_ul }}<p><label>Your username: {{ form.username }}</label></p> {{ form.password1.errors.as_ul }}<p><label>Password: {{ form.password1 }}</label></p> {{ form.password2.errors.as_ul }}<p><label>Password (again): {{ form.password2 }}</label></p> <input type="submit" required> </form>''') self.assertHTMLEqual( t.render(Context({ 'form': UserRegistration({'username': 'django', 'password1': 'foo', 'password2': 'bar'}, auto_id=False) })), """<form> <p><label>Your username: <input type="text" name="username" value="django" maxlength="10" required></label></p> <p><label>Password: <input type="password" name="password1" required></label></p> <p><label>Password (again): <input type="password" name="password2" required></label></p> <input type="submit" required> </form>""" ) t = Template('''<form> {{ form.non_field_errors }} {{ form.username.errors.as_ul }}<p><label>Your username: {{ form.username }}</label></p> {{ form.password1.errors.as_ul }}<p><label>Password: {{ form.password1 }}</label></p> {{ form.password2.errors.as_ul }}<p><label>Password (again): {{ form.password2 }}</label></p> <input type="submit" required> </form>''') self.assertHTMLEqual( t.render(Context({ 'form': UserRegistration({'username': 'django', 'password1': 'foo', 'password2': 'bar'}, auto_id=False) })), """<form> <ul class="errorlist nonfield"><li>Please make sure your passwords match.</li></ul> <p><label>Your username: <input type="text" name="username" value="django" maxlength="10" required></label></p> <p><label>Password: <input type="password" name="password1" required></label></p> <p><label>Password (again): <input type="password" name="password2" required></label></p> <input type="submit" required> </form>""" ) def test_empty_permitted(self): # Sometimes (pretty much in formsets) we want to allow a form to pass validation # if it is completely empty. We can accomplish this by using the empty_permitted # argument to a form constructor. class SongForm(Form): artist = CharField() name = CharField() # First let's show what happens id empty_permitted=False (the default): data = {'artist': '', 'song': ''} form = SongForm(data, empty_permitted=False) self.assertFalse(form.is_valid()) self.assertEqual(form.errors, {'name': ['This field is required.'], 'artist': ['This field is required.']}) self.assertEqual(form.cleaned_data, {}) # Now let's show what happens when empty_permitted=True and the form is empty. form = SongForm(data, empty_permitted=True, use_required_attribute=False) self.assertTrue(form.is_valid()) self.assertEqual(form.errors, {}) self.assertEqual(form.cleaned_data, {}) # But if we fill in data for one of the fields, the form is no longer empty and # the whole thing must pass validation. data = {'artist': 'The Doors', 'song': ''} form = SongForm(data, empty_permitted=False) self.assertFalse(form.is_valid()) self.assertEqual(form.errors, {'name': ['This field is required.']}) self.assertEqual(form.cleaned_data, {'artist': 'The Doors'}) # If a field is not given in the data then None is returned for its data. Lets # make sure that when checking for empty_permitted that None is treated # accordingly. data = {'artist': None, 'song': ''} form = SongForm(data, empty_permitted=True, use_required_attribute=False) self.assertTrue(form.is_valid()) # However, we *really* need to be sure we are checking for None as any data in # initial that returns False on a boolean call needs to be treated literally. class PriceForm(Form): amount = FloatField() qty = IntegerField() data = {'amount': '0.0', 'qty': ''} form = PriceForm(data, initial={'amount': 0.0}, empty_permitted=True, use_required_attribute=False) self.assertTrue(form.is_valid()) def test_empty_permitted_and_use_required_attribute(self): msg = ( 'The empty_permitted and use_required_attribute arguments may not ' 'both be True.' ) with self.assertRaisesMessage(ValueError, msg): Person(empty_permitted=True, use_required_attribute=True) def test_extracting_hidden_and_visible(self): class SongForm(Form): token = CharField(widget=HiddenInput) artist = CharField() name = CharField() form = SongForm() self.assertEqual([f.name for f in form.hidden_fields()], ['token']) self.assertEqual([f.name for f in form.visible_fields()], ['artist', 'name']) def test_hidden_initial_gets_id(self): class MyForm(Form): field1 = CharField(max_length=50, show_hidden_initial=True) self.assertHTMLEqual( MyForm().as_table(), '<tr><th><label for="id_field1">Field1:</label></th>' '<td><input id="id_field1" type="text" name="field1" maxlength="50" required>' '<input type="hidden" name="initial-field1" id="initial-id_field1"></td></tr>' ) def test_error_html_required_html_classes(self): class Person(Form): name = CharField() is_cool = NullBooleanField() email = EmailField(required=False) age = IntegerField() p = Person({}) p.error_css_class = 'error' p.required_css_class = 'required' self.assertHTMLEqual( p.as_ul(), """<li class="required error"><ul class="errorlist"><li>This field is required.</li></ul> <label class="required" for="id_name">Name:</label> <input type="text" name="name" id="id_name" required></li> <li class="required"><label class="required" for="id_is_cool">Is cool:</label> <select name="is_cool" id="id_is_cool"> <option value="unknown" selected>Unknown</option> <option value="true">Yes</option> <option value="false">No</option> </select></li> <li><label for="id_email">Email:</label> <input type="email" name="email" id="id_email"></li> <li class="required error"><ul class="errorlist"><li>This field is required.</li></ul> <label class="required" for="id_age">Age:</label> <input type="number" name="age" id="id_age" required></li>""" ) self.assertHTMLEqual( p.as_p(), """<ul class="errorlist"><li>This field is required.</li></ul> <p class="required error"><label class="required" for="id_name">Name:</label> <input type="text" name="name" id="id_name" required></p> <p class="required"><label class="required" for="id_is_cool">Is cool:</label> <select name="is_cool" id="id_is_cool"> <option value="unknown" selected>Unknown</option> <option value="true">Yes</option> <option value="false">No</option> </select></p> <p><label for="id_email">Email:</label> <input type="email" name="email" id="id_email"></p> <ul class="errorlist"><li>This field is required.</li></ul> <p class="required error"><label class="required" for="id_age">Age:</label> <input type="number" name="age" id="id_age" required></p>""" ) self.assertHTMLEqual( p.as_table(), """<tr class="required error"> <th><label class="required" for="id_name">Name:</label></th> <td><ul class="errorlist"><li>This field is required.</li></ul> <input type="text" name="name" id="id_name" required></td></tr> <tr class="required"><th><label class="required" for="id_is_cool">Is cool:</label></th> <td><select name="is_cool" id="id_is_cool"> <option value="unknown" selected>Unknown</option> <option value="true">Yes</option> <option value="false">No</option> </select></td></tr> <tr><th><label for="id_email">Email:</label></th><td> <input type="email" name="email" id="id_email"></td></tr> <tr class="required error"><th><label class="required" for="id_age">Age:</label></th> <td><ul class="errorlist"><li>This field is required.</li></ul> <input type="number" name="age" id="id_age" required></td></tr>""" ) def test_label_has_required_css_class(self): """ #17922 - required_css_class is added to the label_tag() of required fields. """ class SomeForm(Form): required_css_class = 'required' field = CharField(max_length=10) field2 = IntegerField(required=False) f = SomeForm({'field': 'test'}) self.assertHTMLEqual(f['field'].label_tag(), '<label for="id_field" class="required">Field:</label>') self.assertHTMLEqual( f['field'].label_tag(attrs={'class': 'foo'}), '<label for="id_field" class="foo required">Field:</label>' ) self.assertHTMLEqual(f['field2'].label_tag(), '<label for="id_field2">Field2:</label>') def test_label_split_datetime_not_displayed(self): class EventForm(Form): happened_at = SplitDateTimeField(widget=SplitHiddenDateTimeWidget) form = EventForm() self.assertHTMLEqual( form.as_ul(), '<input type="hidden" name="happened_at_0" id="id_happened_at_0">' '<input type="hidden" name="happened_at_1" id="id_happened_at_1">' ) def test_multivalue_field_validation(self): def bad_names(value): if value == 'bad value': raise ValidationError('bad value not allowed') class NameField(MultiValueField): def __init__(self, fields=(), *args, **kwargs): fields = (CharField(label='First name', max_length=10), CharField(label='Last name', max_length=10)) super().__init__(fields=fields, *args, **kwargs) def compress(self, data_list): return ' '.join(data_list) class NameForm(Form): name = NameField(validators=[bad_names]) form = NameForm(data={'name': ['bad', 'value']}) form.full_clean() self.assertFalse(form.is_valid()) self.assertEqual(form.errors, {'name': ['bad value not allowed']}) form = NameForm(data={'name': ['should be overly', 'long for the field names']}) self.assertFalse(form.is_valid()) self.assertEqual( form.errors, { 'name': [ 'Ensure this value has at most 10 characters (it has 16).', 'Ensure this value has at most 10 characters (it has 24).', ], } ) form = NameForm(data={'name': ['fname', 'lname']}) self.assertTrue(form.is_valid()) self.assertEqual(form.cleaned_data, {'name': 'fname lname'}) def test_multivalue_deep_copy(self): """ #19298 -- MultiValueField needs to override the default as it needs to deep-copy subfields: """ class ChoicesField(MultiValueField): def __init__(self, fields=(), *args, **kwargs): fields = ( ChoiceField(label='Rank', choices=((1, 1), (2, 2))), CharField(label='Name', max_length=10), ) super().__init__(fields=fields, *args, **kwargs) field = ChoicesField() field2 = copy.deepcopy(field) self.assertIsInstance(field2, ChoicesField) self.assertIsNot(field2.fields, field.fields) self.assertIsNot(field2.fields[0].choices, field.fields[0].choices) def test_multivalue_initial_data(self): """ #23674 -- invalid initial data should not break form.changed_data() """ class DateAgeField(MultiValueField): def __init__(self, fields=(), *args, **kwargs): fields = (DateField(label="Date"), IntegerField(label="Age")) super().__init__(fields=fields, *args, **kwargs) class DateAgeForm(Form): date_age = DateAgeField() data = {"date_age": ["1998-12-06", 16]} form = DateAgeForm(data, initial={"date_age": ["200-10-10", 14]}) self.assertTrue(form.has_changed()) def test_multivalue_optional_subfields(self): class PhoneField(MultiValueField): def __init__(self, *args, **kwargs): fields = ( CharField(label='Country Code', validators=[ RegexValidator(r'^\+[0-9]{1,2}$', message='Enter a valid country code.')]), CharField(label='Phone Number'), CharField(label='Extension', error_messages={'incomplete': 'Enter an extension.'}), CharField(label='Label', required=False, help_text='E.g. home, work.'), ) super().__init__(fields, *args, **kwargs) def compress(self, data_list): if data_list: return '%s.%s ext. %s (label: %s)' % tuple(data_list) return None # An empty value for any field will raise a `required` error on a # required `MultiValueField`. f = PhoneField() with self.assertRaisesMessage(ValidationError, "'This field is required.'"): f.clean('') with self.assertRaisesMessage(ValidationError, "'This field is required.'"): f.clean(None) with self.assertRaisesMessage(ValidationError, "'This field is required.'"): f.clean([]) with self.assertRaisesMessage(ValidationError, "'This field is required.'"): f.clean(['+61']) with self.assertRaisesMessage(ValidationError, "'This field is required.'"): f.clean(['+61', '287654321', '123']) self.assertEqual('+61.287654321 ext. 123 (label: Home)', f.clean(['+61', '287654321', '123', 'Home'])) with self.assertRaisesMessage(ValidationError, "'Enter a valid country code.'"): f.clean(['61', '287654321', '123', 'Home']) # Empty values for fields will NOT raise a `required` error on an # optional `MultiValueField` f = PhoneField(required=False) self.assertIsNone(f.clean('')) self.assertIsNone(f.clean(None)) self.assertIsNone(f.clean([])) self.assertEqual('+61. ext. (label: )', f.clean(['+61'])) self.assertEqual('+61.287654321 ext. 123 (label: )', f.clean(['+61', '287654321', '123'])) self.assertEqual('+61.287654321 ext. 123 (label: Home)', f.clean(['+61', '287654321', '123', 'Home'])) with self.assertRaisesMessage(ValidationError, "'Enter a valid country code.'"): f.clean(['61', '287654321', '123', 'Home']) # For a required `MultiValueField` with `require_all_fields=False`, a # `required` error will only be raised if all fields are empty. Fields # can individually be required or optional. An empty value for any # required field will raise an `incomplete` error. f = PhoneField(require_all_fields=False) with self.assertRaisesMessage(ValidationError, "'This field is required.'"): f.clean('') with self.assertRaisesMessage(ValidationError, "'This field is required.'"): f.clean(None) with self.assertRaisesMessage(ValidationError, "'This field is required.'"): f.clean([]) with self.assertRaisesMessage(ValidationError, "'Enter a complete value.'"): f.clean(['+61']) self.assertEqual('+61.287654321 ext. 123 (label: )', f.clean(['+61', '287654321', '123'])) with self.assertRaisesMessage(ValidationError, "'Enter a complete value.', 'Enter an extension.'"): f.clean(['', '', '', 'Home']) with self.assertRaisesMessage(ValidationError, "'Enter a valid country code.'"): f.clean(['61', '287654321', '123', 'Home']) # For an optional `MultiValueField` with `require_all_fields=False`, we # don't get any `required` error but we still get `incomplete` errors. f = PhoneField(required=False, require_all_fields=False) self.assertIsNone(f.clean('')) self.assertIsNone(f.clean(None)) self.assertIsNone(f.clean([])) with self.assertRaisesMessage(ValidationError, "'Enter a complete value.'"): f.clean(['+61']) self.assertEqual('+61.287654321 ext. 123 (label: )', f.clean(['+61', '287654321', '123'])) with self.assertRaisesMessage(ValidationError, "'Enter a complete value.', 'Enter an extension.'"): f.clean(['', '', '', 'Home']) with self.assertRaisesMessage(ValidationError, "'Enter a valid country code.'"): f.clean(['61', '287654321', '123', 'Home']) def test_custom_empty_values(self): """ Form fields can customize what is considered as an empty value for themselves (#19997). """ class CustomJSONField(CharField): empty_values = [None, ''] def to_python(self, value): # Fake json.loads if value == '{}': return {} return super().to_python(value) class JSONForm(forms.Form): json = CustomJSONField() form = JSONForm(data={'json': '{}'}) form.full_clean() self.assertEqual(form.cleaned_data, {'json': {}}) def test_boundfield_label_tag(self): class SomeForm(Form): field = CharField() boundfield = SomeForm()['field'] testcases = [ # (args, kwargs, expected) # without anything: just print the <label> ((), {}, '<label for="id_field">Field:</label>'), # passing just one argument: overrides the field's label (('custom',), {}, '<label for="id_field">custom:</label>'), # the overridden label is escaped (('custom&',), {}, '<label for="id_field">custom&amp;:</label>'), ((mark_safe('custom&'),), {}, '<label for="id_field">custom&:</label>'), # Passing attrs to add extra attributes on the <label> ((), {'attrs': {'class': 'pretty'}}, '<label for="id_field" class="pretty">Field:</label>') ] for args, kwargs, expected in testcases: with self.subTest(args=args, kwargs=kwargs): self.assertHTMLEqual(boundfield.label_tag(*args, **kwargs), expected) def test_boundfield_label_tag_no_id(self): """ If a widget has no id, label_tag just returns the text with no surrounding <label>. """ class SomeForm(Form): field = CharField() boundfield = SomeForm(auto_id='')['field'] self.assertHTMLEqual(boundfield.label_tag(), 'Field:') self.assertHTMLEqual(boundfield.label_tag('Custom&'), 'Custom&amp;:') def test_boundfield_label_tag_custom_widget_id_for_label(self): class CustomIdForLabelTextInput(TextInput): def id_for_label(self, id): return 'custom_' + id class EmptyIdForLabelTextInput(TextInput): def id_for_label(self, id): return None class SomeForm(Form): custom = CharField(widget=CustomIdForLabelTextInput) empty = CharField(widget=EmptyIdForLabelTextInput) form = SomeForm() self.assertHTMLEqual(form['custom'].label_tag(), '<label for="custom_id_custom">Custom:</label>') self.assertHTMLEqual(form['empty'].label_tag(), '<label>Empty:</label>') def test_boundfield_empty_label(self): class SomeForm(Form): field = CharField(label='') boundfield = SomeForm()['field'] self.assertHTMLEqual(boundfield.label_tag(), '<label for="id_field"></label>') def test_boundfield_id_for_label(self): class SomeForm(Form): field = CharField(label='') self.assertEqual(SomeForm()['field'].id_for_label, 'id_field') def test_boundfield_id_for_label_override_by_attrs(self): """ If an id is provided in `Widget.attrs`, it overrides the generated ID, unless it is `None`. """ class SomeForm(Form): field = CharField(widget=TextInput(attrs={'id': 'myCustomID'})) field_none = CharField(widget=TextInput(attrs={'id': None})) form = SomeForm() self.assertEqual(form['field'].id_for_label, 'myCustomID') self.assertEqual(form['field_none'].id_for_label, 'id_field_none') def test_label_tag_override(self): """ BoundField label_suffix (if provided) overrides Form label_suffix """ class SomeForm(Form): field = CharField() boundfield = SomeForm(label_suffix='!')['field'] self.assertHTMLEqual(boundfield.label_tag(label_suffix='$'), '<label for="id_field">Field$</label>') def test_field_name(self): """#5749 - `field_name` may be used as a key in _html_output().""" class SomeForm(Form): some_field = CharField() def as_p(self): return self._html_output( normal_row='<p id="p_%(field_name)s"></p>', error_row='%s', row_ender='</p>', help_text_html=' %s', errors_on_separate_row=True, ) form = SomeForm() self.assertHTMLEqual(form.as_p(), '<p id="p_some_field"></p>') def test_field_without_css_classes(self): """ `css_classes` may be used as a key in _html_output() (empty classes). """ class SomeForm(Form): some_field = CharField() def as_p(self): return self._html_output( normal_row='<p class="%(css_classes)s"></p>', error_row='%s', row_ender='</p>', help_text_html=' %s', errors_on_separate_row=True, ) form = SomeForm() self.assertHTMLEqual(form.as_p(), '<p class=""></p>') def test_field_with_css_class(self): """ `css_classes` may be used as a key in _html_output() (class comes from required_css_class in this case). """ class SomeForm(Form): some_field = CharField() required_css_class = 'foo' def as_p(self): return self._html_output( normal_row='<p class="%(css_classes)s"></p>', error_row='%s', row_ender='</p>', help_text_html=' %s', errors_on_separate_row=True, ) form = SomeForm() self.assertHTMLEqual(form.as_p(), '<p class="foo"></p>') def test_field_name_with_hidden_input(self): """ BaseForm._html_output() should merge all the hidden input fields and put them in the last row. """ class SomeForm(Form): hidden1 = CharField(widget=HiddenInput) custom = CharField() hidden2 = CharField(widget=HiddenInput) def as_p(self): return self._html_output( normal_row='<p%(html_class_attr)s>%(field)s %(field_name)s</p>', error_row='%s', row_ender='</p>', help_text_html=' %s', errors_on_separate_row=True, ) form = SomeForm() self.assertHTMLEqual( form.as_p(), '<p><input id="id_custom" name="custom" type="text" required> custom' '<input id="id_hidden1" name="hidden1" type="hidden">' '<input id="id_hidden2" name="hidden2" type="hidden"></p>' ) def test_field_name_with_hidden_input_and_non_matching_row_ender(self): """ BaseForm._html_output() should merge all the hidden input fields and put them in the last row ended with the specific row ender. """ class SomeForm(Form): hidden1 = CharField(widget=HiddenInput) custom = CharField() hidden2 = CharField(widget=HiddenInput) def as_p(self): return self._html_output( normal_row='<p%(html_class_attr)s>%(field)s %(field_name)s</p>', error_row='%s', row_ender='<hr><hr>', help_text_html=' %s', errors_on_separate_row=True ) form = SomeForm() self.assertHTMLEqual( form.as_p(), '<p><input id="id_custom" name="custom" type="text" required> custom</p>\n' '<input id="id_hidden1" name="hidden1" type="hidden">' '<input id="id_hidden2" name="hidden2" type="hidden"><hr><hr>' ) def test_error_dict(self): class MyForm(Form): foo = CharField() bar = CharField() def clean(self): raise ValidationError('Non-field error.', code='secret', params={'a': 1, 'b': 2}) form = MyForm({}) self.assertIs(form.is_valid(), False) errors = form.errors.as_text() control = [ '* foo\n * This field is required.', '* bar\n * This field is required.', '* __all__\n * Non-field error.', ] for error in control: self.assertIn(error, errors) errors = form.errors.as_ul() control = [ '<li>foo<ul class="errorlist"><li>This field is required.</li></ul></li>', '<li>bar<ul class="errorlist"><li>This field is required.</li></ul></li>', '<li>__all__<ul class="errorlist nonfield"><li>Non-field error.</li></ul></li>', ] for error in control: self.assertInHTML(error, errors) errors = form.errors.get_json_data() control = { 'foo': [{'code': 'required', 'message': 'This field is required.'}], 'bar': [{'code': 'required', 'message': 'This field is required.'}], '__all__': [{'code': 'secret', 'message': 'Non-field error.'}] } self.assertEqual(errors, control) self.assertEqual(json.dumps(errors), form.errors.as_json()) def test_error_dict_as_json_escape_html(self): """#21962 - adding html escape flag to ErrorDict""" class MyForm(Form): foo = CharField() bar = CharField() def clean(self): raise ValidationError( '<p>Non-field error.</p>', code='secret', params={'a': 1, 'b': 2}, ) control = { 'foo': [{'code': 'required', 'message': 'This field is required.'}], 'bar': [{'code': 'required', 'message': 'This field is required.'}], '__all__': [{'code': 'secret', 'message': '<p>Non-field error.</p>'}] } form = MyForm({}) self.assertFalse(form.is_valid()) errors = json.loads(form.errors.as_json()) self.assertEqual(errors, control) escaped_error = '&lt;p&gt;Non-field error.&lt;/p&gt;' self.assertEqual( form.errors.get_json_data(escape_html=True)['__all__'][0]['message'], escaped_error ) errors = json.loads(form.errors.as_json(escape_html=True)) control['__all__'][0]['message'] = escaped_error self.assertEqual(errors, control) def test_error_list(self): e = ErrorList() e.append('Foo') e.append(ValidationError('Foo%(bar)s', code='foobar', params={'bar': 'bar'})) self.assertIsInstance(e, list) self.assertIn('Foo', e) self.assertIn('Foo', forms.ValidationError(e)) self.assertEqual( e.as_text(), '* Foo\n* Foobar' ) self.assertEqual( e.as_ul(), '<ul class="errorlist"><li>Foo</li><li>Foobar</li></ul>' ) errors = e.get_json_data() self.assertEqual( errors, [{"message": "Foo", "code": ""}, {"message": "Foobar", "code": "foobar"}] ) self.assertEqual(json.dumps(errors), e.as_json()) def test_error_list_class_not_specified(self): e = ErrorList() e.append('Foo') e.append(ValidationError('Foo%(bar)s', code='foobar', params={'bar': 'bar'})) self.assertEqual( e.as_ul(), '<ul class="errorlist"><li>Foo</li><li>Foobar</li></ul>' ) def test_error_list_class_has_one_class_specified(self): e = ErrorList(error_class='foobar-error-class') e.append('Foo') e.append(ValidationError('Foo%(bar)s', code='foobar', params={'bar': 'bar'})) self.assertEqual( e.as_ul(), '<ul class="errorlist foobar-error-class"><li>Foo</li><li>Foobar</li></ul>' ) def test_error_list_with_hidden_field_errors_has_correct_class(self): class Person(Form): first_name = CharField() last_name = CharField(widget=HiddenInput) p = Person({'first_name': 'John'}) self.assertHTMLEqual( p.as_ul(), """<li><ul class="errorlist nonfield"> <li>(Hidden field last_name) This field is required.</li></ul></li><li> <label for="id_first_name">First name:</label> <input id="id_first_name" name="first_name" type="text" value="John" required> <input id="id_last_name" name="last_name" type="hidden"></li>""" ) self.assertHTMLEqual( p.as_p(), """<ul class="errorlist nonfield"><li>(Hidden field last_name) This field is required.</li></ul> <p><label for="id_first_name">First name:</label> <input id="id_first_name" name="first_name" type="text" value="John" required> <input id="id_last_name" name="last_name" type="hidden"></p>""" ) self.assertHTMLEqual( p.as_table(), """<tr><td colspan="2"><ul class="errorlist nonfield"> <li>(Hidden field last_name) This field is required.</li></ul></td></tr> <tr><th><label for="id_first_name">First name:</label></th><td> <input id="id_first_name" name="first_name" type="text" value="John" required> <input id="id_last_name" name="last_name" type="hidden"></td></tr>""" ) def test_error_list_with_non_field_errors_has_correct_class(self): class Person(Form): first_name = CharField() last_name = CharField() def clean(self): raise ValidationError('Generic validation error') p = Person({'first_name': 'John', 'last_name': 'Lennon'}) self.assertHTMLEqual( str(p.non_field_errors()), '<ul class="errorlist nonfield"><li>Generic validation error</li></ul>' ) self.assertHTMLEqual( p.as_ul(), """<li> <ul class="errorlist nonfield"><li>Generic validation error</li></ul></li> <li><label for="id_first_name">First name:</label> <input id="id_first_name" name="first_name" type="text" value="John" required></li> <li><label for="id_last_name">Last name:</label> <input id="id_last_name" name="last_name" type="text" value="Lennon" required></li>""" ) self.assertHTMLEqual( p.non_field_errors().as_text(), '* Generic validation error' ) self.assertHTMLEqual( p.as_p(), """<ul class="errorlist nonfield"><li>Generic validation error</li></ul> <p><label for="id_first_name">First name:</label> <input id="id_first_name" name="first_name" type="text" value="John" required></p> <p><label for="id_last_name">Last name:</label> <input id="id_last_name" name="last_name" type="text" value="Lennon" required></p>""" ) self.assertHTMLEqual( p.as_table(), """<tr><td colspan="2"><ul class="errorlist nonfield"><li>Generic validation error</li></ul></td></tr> <tr><th><label for="id_first_name">First name:</label></th><td> <input id="id_first_name" name="first_name" type="text" value="John" required></td></tr> <tr><th><label for="id_last_name">Last name:</label></th><td> <input id="id_last_name" name="last_name" type="text" value="Lennon" required></td></tr>""" ) def test_errorlist_override(self): class DivErrorList(ErrorList): def __str__(self): return self.as_divs() def as_divs(self): if not self: return '' return '<div class="errorlist">%s</div>' % ''.join( '<div class="error">%s</div>' % e for e in self) class CommentForm(Form): name = CharField(max_length=50, required=False) email = EmailField() comment = CharField() data = {'email': 'invalid'} f = CommentForm(data, auto_id=False, error_class=DivErrorList) self.assertHTMLEqual(f.as_p(), """<p>Name: <input type="text" name="name" maxlength="50"></p> <div class="errorlist"><div class="error">Enter a valid email address.</div></div> <p>Email: <input type="email" name="email" value="invalid" required></p> <div class="errorlist"><div class="error">This field is required.</div></div> <p>Comment: <input type="text" name="comment" required></p>""") def test_error_escaping(self): class TestForm(Form): hidden = CharField(widget=HiddenInput(), required=False) visible = CharField() def clean_hidden(self): raise ValidationError('Foo & "bar"!') clean_visible = clean_hidden form = TestForm({'hidden': 'a', 'visible': 'b'}) form.is_valid() self.assertHTMLEqual( form.as_ul(), '<li><ul class="errorlist nonfield"><li>(Hidden field hidden) Foo &amp; &quot;bar&quot;!</li></ul></li>' '<li><ul class="errorlist"><li>Foo &amp; &quot;bar&quot;!</li></ul>' '<label for="id_visible">Visible:</label> ' '<input type="text" name="visible" value="b" id="id_visible" required>' '<input type="hidden" name="hidden" value="a" id="id_hidden"></li>' ) def test_baseform_repr(self): """ BaseForm.__repr__() should contain some basic information about the form. """ p = Person() self.assertEqual(repr(p), "<Person bound=False, valid=Unknown, fields=(first_name;last_name;birthday)>") p = Person({'first_name': 'John', 'last_name': 'Lennon', 'birthday': '1940-10-9'}) self.assertEqual(repr(p), "<Person bound=True, valid=Unknown, fields=(first_name;last_name;birthday)>") p.is_valid() self.assertEqual(repr(p), "<Person bound=True, valid=True, fields=(first_name;last_name;birthday)>") p = Person({'first_name': 'John', 'last_name': 'Lennon', 'birthday': 'fakedate'}) p.is_valid() self.assertEqual(repr(p), "<Person bound=True, valid=False, fields=(first_name;last_name;birthday)>") def test_baseform_repr_dont_trigger_validation(self): """ BaseForm.__repr__() shouldn't trigger the form validation. """ p = Person({'first_name': 'John', 'last_name': 'Lennon', 'birthday': 'fakedate'}) repr(p) with self.assertRaises(AttributeError): p.cleaned_data self.assertFalse(p.is_valid()) self.assertEqual(p.cleaned_data, {'first_name': 'John', 'last_name': 'Lennon'}) def test_accessing_clean(self): class UserForm(Form): username = CharField(max_length=10) password = CharField(widget=PasswordInput) def clean(self): data = self.cleaned_data if not self.errors: data['username'] = data['username'].lower() return data f = UserForm({'username': 'SirRobin', 'password': 'blue'}) self.assertTrue(f.is_valid()) self.assertEqual(f.cleaned_data['username'], 'sirrobin') def test_changing_cleaned_data_nothing_returned(self): class UserForm(Form): username = CharField(max_length=10) password = CharField(widget=PasswordInput) def clean(self): self.cleaned_data['username'] = self.cleaned_data['username'].lower() # don't return anything f = UserForm({'username': 'SirRobin', 'password': 'blue'}) self.assertTrue(f.is_valid()) self.assertEqual(f.cleaned_data['username'], 'sirrobin') def test_changing_cleaned_data_in_clean(self): class UserForm(Form): username = CharField(max_length=10) password = CharField(widget=PasswordInput) def clean(self): data = self.cleaned_data # Return a different dict. We have not changed self.cleaned_data. return { 'username': data['username'].lower(), 'password': 'this_is_not_a_secret', } f = UserForm({'username': 'SirRobin', 'password': 'blue'}) self.assertTrue(f.is_valid()) self.assertEqual(f.cleaned_data['username'], 'sirrobin') def test_multipart_encoded_form(self): class FormWithoutFile(Form): username = CharField() class FormWithFile(Form): username = CharField() file = FileField() class FormWithImage(Form): image = ImageField() self.assertFalse(FormWithoutFile().is_multipart()) self.assertTrue(FormWithFile().is_multipart()) self.assertTrue(FormWithImage().is_multipart()) def test_html_safe(self): class SimpleForm(Form): username = CharField() form = SimpleForm() self.assertTrue(hasattr(SimpleForm, '__html__')) self.assertEqual(str(form), form.__html__()) self.assertTrue(hasattr(form['username'], '__html__')) self.assertEqual(str(form['username']), form['username'].__html__()) def test_use_required_attribute_true(self): class MyForm(Form): use_required_attribute = True f1 = CharField(max_length=30) f2 = CharField(max_length=30, required=False) f3 = CharField(widget=Textarea) f4 = ChoiceField(choices=[('P', 'Python'), ('J', 'Java')]) form = MyForm() self.assertHTMLEqual( form.as_p(), '<p><label for="id_f1">F1:</label> <input id="id_f1" maxlength="30" name="f1" type="text" required></p>' '<p><label for="id_f2">F2:</label> <input id="id_f2" maxlength="30" name="f2" type="text"></p>' '<p><label for="id_f3">F3:</label> <textarea cols="40" id="id_f3" name="f3" rows="10" required>' '</textarea></p>' '<p><label for="id_f4">F4:</label> <select id="id_f4" name="f4">' '<option value="P">Python</option>' '<option value="J">Java</option>' '</select></p>', ) self.assertHTMLEqual( form.as_ul(), '<li><label for="id_f1">F1:</label> ' '<input id="id_f1" maxlength="30" name="f1" type="text" required></li>' '<li><label for="id_f2">F2:</label> <input id="id_f2" maxlength="30" name="f2" type="text"></li>' '<li><label for="id_f3">F3:</label> <textarea cols="40" id="id_f3" name="f3" rows="10" required>' '</textarea></li>' '<li><label for="id_f4">F4:</label> <select id="id_f4" name="f4">' '<option value="P">Python</option>' '<option value="J">Java</option>' '</select></li>', ) self.assertHTMLEqual( form.as_table(), '<tr><th><label for="id_f1">F1:</label></th>' '<td><input id="id_f1" maxlength="30" name="f1" type="text" required></td></tr>' '<tr><th><label for="id_f2">F2:</label></th>' '<td><input id="id_f2" maxlength="30" name="f2" type="text"></td></tr>' '<tr><th><label for="id_f3">F3:</label></th>' '<td><textarea cols="40" id="id_f3" name="f3" rows="10" required>' '</textarea></td></tr>' '<tr><th><label for="id_f4">F4:</label></th><td><select id="id_f4" name="f4">' '<option value="P">Python</option>' '<option value="J">Java</option>' '</select></td></tr>', ) def test_use_required_attribute_false(self): class MyForm(Form): use_required_attribute = False f1 = CharField(max_length=30) f2 = CharField(max_length=30, required=False) f3 = CharField(widget=Textarea) f4 = ChoiceField(choices=[('P', 'Python'), ('J', 'Java')]) form = MyForm() self.assertHTMLEqual( form.as_p(), '<p><label for="id_f1">F1:</label> <input id="id_f1" maxlength="30" name="f1" type="text"></p>' '<p><label for="id_f2">F2:</label> <input id="id_f2" maxlength="30" name="f2" type="text"></p>' '<p><label for="id_f3">F3:</label> <textarea cols="40" id="id_f3" name="f3" rows="10">' '</textarea></p>' '<p><label for="id_f4">F4:</label> <select id="id_f4" name="f4">' '<option value="P">Python</option>' '<option value="J">Java</option>' '</select></p>', ) self.assertHTMLEqual( form.as_ul(), '<li><label for="id_f1">F1:</label> <input id="id_f1" maxlength="30" name="f1" type="text"></li>' '<li><label for="id_f2">F2:</label> <input id="id_f2" maxlength="30" name="f2" type="text"></li>' '<li><label for="id_f3">F3:</label> <textarea cols="40" id="id_f3" name="f3" rows="10">' '</textarea></li>' '<li><label for="id_f4">F4:</label> <select id="id_f4" name="f4">' '<option value="P">Python</option>' '<option value="J">Java</option>' '</select></li>', ) self.assertHTMLEqual( form.as_table(), '<tr><th><label for="id_f1">F1:</label></th>' '<td><input id="id_f1" maxlength="30" name="f1" type="text"></td></tr>' '<tr><th><label for="id_f2">F2:</label></th>' '<td><input id="id_f2" maxlength="30" name="f2" type="text"></td></tr>' '<tr><th><label for="id_f3">F3:</label></th><td><textarea cols="40" id="id_f3" name="f3" rows="10">' '</textarea></td></tr>' '<tr><th><label for="id_f4">F4:</label></th><td><select id="id_f4" name="f4">' '<option value="P">Python</option>' '<option value="J">Java</option>' '</select></td></tr>', ) def test_only_hidden_fields(self): # A form with *only* hidden fields that has errors is going to be very unusual. class HiddenForm(Form): data = IntegerField(widget=HiddenInput) f = HiddenForm({}) self.assertHTMLEqual( f.as_p(), '<ul class="errorlist nonfield">' '<li>(Hidden field data) This field is required.</li></ul>\n<p> ' '<input type="hidden" name="data" id="id_data"></p>' ) self.assertHTMLEqual( f.as_table(), '<tr><td colspan="2"><ul class="errorlist nonfield">' '<li>(Hidden field data) This field is required.</li></ul>' '<input type="hidden" name="data" id="id_data"></td></tr>' ) def test_field_named_data(self): class DataForm(Form): data = CharField(max_length=10) f = DataForm({'data': 'xyzzy'}) self.assertTrue(f.is_valid()) self.assertEqual(f.cleaned_data, {'data': 'xyzzy'}) def test_empty_data_files_multi_value_dict(self): p = Person() self.assertIsInstance(p.data, MultiValueDict) self.assertIsInstance(p.files, MultiValueDict) class CustomRenderer(DjangoTemplates): pass class RendererTests(SimpleTestCase): def test_default(self): form = Form() self.assertEqual(form.renderer, get_default_renderer()) def test_kwarg_instance(self): custom = CustomRenderer() form = Form(renderer=custom) self.assertEqual(form.renderer, custom) def test_kwarg_class(self): custom = CustomRenderer() form = Form(renderer=custom) self.assertEqual(form.renderer, custom) def test_attribute_instance(self): class CustomForm(Form): default_renderer = DjangoTemplates() form = CustomForm() self.assertEqual(form.renderer, CustomForm.default_renderer) def test_attribute_class(self): class CustomForm(Form): default_renderer = CustomRenderer form = CustomForm() self.assertTrue(isinstance(form.renderer, CustomForm.default_renderer)) def test_attribute_override(self): class CustomForm(Form): default_renderer = DjangoTemplates() custom = CustomRenderer() form = CustomForm(renderer=custom) self.assertEqual(form.renderer, custom)
1017cae95bb38d78b26706e985e1da6610dc4610261dbc385eec8004cd67f295
import datetime import unittest from django.db import connection from django.db.models.fields import BooleanField, NullBooleanField from django.db.utils import DatabaseError from django.test import TransactionTestCase from ..models import NonIntegerAutoField, Square @unittest.skipUnless(connection.vendor == 'oracle', 'Oracle tests') class Tests(unittest.TestCase): def test_quote_name(self): """'%' chars are escaped for query execution.""" name = '"SOME%NAME"' quoted_name = connection.ops.quote_name(name) self.assertEqual(quoted_name % (), name) def test_dbms_session(self): """A stored procedure can be called through a cursor wrapper.""" with connection.cursor() as cursor: cursor.callproc('DBMS_SESSION.SET_IDENTIFIER', ['_django_testing!']) def test_cursor_var(self): """Cursor variables can be passed as query parameters.""" with connection.cursor() as cursor: var = cursor.var(str) cursor.execute("BEGIN %s := 'X'; END; ", [var]) self.assertEqual(var.getvalue(), 'X') def test_client_encoding(self): """Client encoding is set correctly.""" connection.ensure_connection() self.assertEqual(connection.connection.encoding, 'UTF-8') self.assertEqual(connection.connection.nencoding, 'UTF-8') def test_order_of_nls_parameters(self): """ An 'almost right' datetime works with configured NLS parameters (#18465). """ with connection.cursor() as cursor: query = "select 1 from dual where '1936-12-29 00:00' < sysdate" # The query succeeds without errors - pre #18465 this # wasn't the case. cursor.execute(query) self.assertEqual(cursor.fetchone()[0], 1) def test_boolean_constraints(self): """Boolean fields have check constraints on their values.""" for field in (BooleanField(), NullBooleanField(), BooleanField(null=True)): with self.subTest(field=field): field.set_attributes_from_name('is_nice') self.assertIn('"IS_NICE" IN (0,1)', field.db_check(connection)) @unittest.skipUnless(connection.vendor == 'oracle', 'Oracle tests') class TransactionalTests(TransactionTestCase): available_apps = ['backends'] def test_hidden_no_data_found_exception(self): # "ORA-1403: no data found" exception is hidden by Oracle OCI library # when an INSERT statement is used with a RETURNING clause (see #28859). with connection.cursor() as cursor: # Create trigger that raises "ORA-1403: no data found". cursor.execute(""" CREATE OR REPLACE TRIGGER "TRG_NO_DATA_FOUND" AFTER INSERT ON "BACKENDS_SQUARE" FOR EACH ROW BEGIN RAISE NO_DATA_FOUND; END; """) try: with self.assertRaisesMessage(DatabaseError, ( 'The database did not return a new row id. Probably "ORA-1403: ' 'no data found" was raised internally but was hidden by the ' 'Oracle OCI library (see https://code.djangoproject.com/ticket/28859).' )): Square.objects.create(root=2, square=4) finally: with connection.cursor() as cursor: cursor.execute('DROP TRIGGER "TRG_NO_DATA_FOUND"') def test_password_with_at_sign(self): old_password = connection.settings_dict['PASSWORD'] connection.settings_dict['PASSWORD'] = 'p@ssword' try: self.assertIn('/"p@ssword"@', connection._connect_string()) with self.assertRaises(DatabaseError) as context: connection.cursor() # Database exception: "ORA-01017: invalid username/password" is # expected. self.assertIn('ORA-01017', context.exception.args[0].message) finally: connection.settings_dict['PASSWORD'] = old_password def test_non_integer_auto_field(self): with connection.cursor() as cursor: # Create trigger that fill non-integer auto field. cursor.execute(""" CREATE OR REPLACE TRIGGER "TRG_FILL_CREATION_DATETIME" BEFORE INSERT ON "BACKENDS_NONINTEGERAUTOFIELD" FOR EACH ROW BEGIN :NEW.CREATION_DATETIME := SYSTIMESTAMP; END; """) try: NonIntegerAutoField._meta.auto_field = NonIntegerAutoField.creation_datetime obj = NonIntegerAutoField.objects.create() self.assertIsNotNone(obj.creation_datetime) self.assertIsInstance(obj.creation_datetime, datetime.datetime) finally: with connection.cursor() as cursor: cursor.execute('DROP TRIGGER "TRG_FILL_CREATION_DATETIME"')
5e3e1ea66f96caba04e81d493aa1ad1e22bfb95bd176a35426f42f0e27332167
from datetime import datetime, timedelta, timezone as datetime_timezone import pytz from django.conf import settings from django.db.models import ( DateField, DateTimeField, F, IntegerField, Max, OuterRef, Subquery, TimeField, ) from django.db.models.functions import ( Extract, ExtractDay, ExtractHour, ExtractIsoYear, ExtractMinute, ExtractMonth, ExtractQuarter, ExtractSecond, ExtractWeek, ExtractWeekDay, ExtractYear, Trunc, TruncDate, TruncDay, TruncHour, TruncMinute, TruncMonth, TruncQuarter, TruncSecond, TruncTime, TruncWeek, TruncYear, ) from django.test import ( TestCase, override_settings, skipIfDBFeature, skipUnlessDBFeature, ) from django.utils import timezone from ..models import Author, DTModel, Fan def truncate_to(value, kind, tzinfo=None): # Convert to target timezone before truncation if tzinfo is not None: value = value.astimezone(tzinfo) def truncate(value, kind): if kind == 'second': return value.replace(microsecond=0) if kind == 'minute': return value.replace(second=0, microsecond=0) if kind == 'hour': return value.replace(minute=0, second=0, microsecond=0) if kind == 'day': if isinstance(value, datetime): return value.replace(hour=0, minute=0, second=0, microsecond=0) return value if kind == 'week': if isinstance(value, datetime): return (value - timedelta(days=value.weekday())).replace(hour=0, minute=0, second=0, microsecond=0) return value - timedelta(days=value.weekday()) if kind == 'month': if isinstance(value, datetime): return value.replace(day=1, hour=0, minute=0, second=0, microsecond=0) return value.replace(day=1) if kind == 'quarter': month_in_quarter = value.month - (value.month - 1) % 3 if isinstance(value, datetime): return value.replace(month=month_in_quarter, day=1, hour=0, minute=0, second=0, microsecond=0) return value.replace(month=month_in_quarter, day=1) # otherwise, truncate to year if isinstance(value, datetime): return value.replace(month=1, day=1, hour=0, minute=0, second=0, microsecond=0) return value.replace(month=1, day=1) value = truncate(value, kind) if tzinfo is not None: # If there was a daylight saving transition, then reset the timezone. value = timezone.make_aware(value.replace(tzinfo=None), tzinfo) return value @override_settings(USE_TZ=False) class DateFunctionTests(TestCase): def create_model(self, start_datetime, end_datetime): return DTModel.objects.create( name=start_datetime.isoformat() if start_datetime else 'None', start_datetime=start_datetime, end_datetime=end_datetime, start_date=start_datetime.date() if start_datetime else None, end_date=end_datetime.date() if end_datetime else None, start_time=start_datetime.time() if start_datetime else None, end_time=end_datetime.time() if end_datetime else None, duration=(end_datetime - start_datetime) if start_datetime and end_datetime else None, ) def test_extract_year_exact_lookup(self): """ Extract year uses a BETWEEN filter to compare the year to allow indexes to be used. """ start_datetime = datetime(2015, 6, 15, 14, 10) end_datetime = datetime(2016, 6, 15, 14, 10) if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime, is_dst=False) end_datetime = timezone.make_aware(end_datetime, is_dst=False) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) for lookup in ('year', 'iso_year'): with self.subTest(lookup): qs = DTModel.objects.filter(**{'start_datetime__%s__exact' % lookup: 2015}) self.assertEqual(qs.count(), 1) query_string = str(qs.query).lower() self.assertEqual(query_string.count(' between '), 1) self.assertEqual(query_string.count('extract'), 0) # exact is implied and should be the same qs = DTModel.objects.filter(**{'start_datetime__%s' % lookup: 2015}) self.assertEqual(qs.count(), 1) query_string = str(qs.query).lower() self.assertEqual(query_string.count(' between '), 1) self.assertEqual(query_string.count('extract'), 0) # date and datetime fields should behave the same qs = DTModel.objects.filter(**{'start_date__%s' % lookup: 2015}) self.assertEqual(qs.count(), 1) query_string = str(qs.query).lower() self.assertEqual(query_string.count(' between '), 1) self.assertEqual(query_string.count('extract'), 0) # an expression rhs cannot use the between optimization. qs = DTModel.objects.annotate( start_year=ExtractYear('start_datetime'), ).filter(end_datetime__year=F('start_year') + 1) self.assertEqual(qs.count(), 1) query_string = str(qs.query).lower() self.assertEqual(query_string.count(' between '), 0) self.assertEqual(query_string.count('extract'), 3) def test_extract_year_greaterthan_lookup(self): start_datetime = datetime(2015, 6, 15, 14, 10) end_datetime = datetime(2016, 6, 15, 14, 10) if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime, is_dst=False) end_datetime = timezone.make_aware(end_datetime, is_dst=False) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) for lookup in ('year', 'iso_year'): with self.subTest(lookup): qs = DTModel.objects.filter(**{'start_datetime__%s__gt' % lookup: 2015}) self.assertEqual(qs.count(), 1) self.assertEqual(str(qs.query).lower().count('extract'), 0) qs = DTModel.objects.filter(**{'start_datetime__%s__gte' % lookup: 2015}) self.assertEqual(qs.count(), 2) self.assertEqual(str(qs.query).lower().count('extract'), 0) qs = DTModel.objects.annotate( start_year=ExtractYear('start_datetime'), ).filter(**{'end_datetime__%s__gte' % lookup: F('start_year')}) self.assertEqual(qs.count(), 1) self.assertGreaterEqual(str(qs.query).lower().count('extract'), 2) def test_extract_year_lessthan_lookup(self): start_datetime = datetime(2015, 6, 15, 14, 10) end_datetime = datetime(2016, 6, 15, 14, 10) if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime, is_dst=False) end_datetime = timezone.make_aware(end_datetime, is_dst=False) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) for lookup in ('year', 'iso_year'): with self.subTest(lookup): qs = DTModel.objects.filter(**{'start_datetime__%s__lt' % lookup: 2016}) self.assertEqual(qs.count(), 1) self.assertEqual(str(qs.query).count('extract'), 0) qs = DTModel.objects.filter(**{'start_datetime__%s__lte' % lookup: 2016}) self.assertEqual(qs.count(), 2) self.assertEqual(str(qs.query).count('extract'), 0) qs = DTModel.objects.annotate( end_year=ExtractYear('end_datetime'), ).filter(**{'start_datetime__%s__lte' % lookup: F('end_year')}) self.assertEqual(qs.count(), 1) self.assertGreaterEqual(str(qs.query).lower().count('extract'), 2) def test_extract_func(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123) if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime, is_dst=False) end_datetime = timezone.make_aware(end_datetime, is_dst=False) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) with self.assertRaisesMessage(ValueError, 'lookup_name must be provided'): Extract('start_datetime') msg = 'Extract input expression must be DateField, DateTimeField, TimeField, or DurationField.' with self.assertRaisesMessage(ValueError, msg): list(DTModel.objects.annotate(extracted=Extract('name', 'hour'))) with self.assertRaisesMessage( ValueError, "Cannot extract time component 'second' from DateField 'start_date'."): list(DTModel.objects.annotate(extracted=Extract('start_date', 'second'))) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=Extract('start_datetime', 'year')).order_by('start_datetime'), [(start_datetime, start_datetime.year), (end_datetime, end_datetime.year)], lambda m: (m.start_datetime, m.extracted) ) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=Extract('start_datetime', 'quarter')).order_by('start_datetime'), [(start_datetime, 2), (end_datetime, 2)], lambda m: (m.start_datetime, m.extracted) ) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=Extract('start_datetime', 'month')).order_by('start_datetime'), [(start_datetime, start_datetime.month), (end_datetime, end_datetime.month)], lambda m: (m.start_datetime, m.extracted) ) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=Extract('start_datetime', 'day')).order_by('start_datetime'), [(start_datetime, start_datetime.day), (end_datetime, end_datetime.day)], lambda m: (m.start_datetime, m.extracted) ) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=Extract('start_datetime', 'week')).order_by('start_datetime'), [(start_datetime, 25), (end_datetime, 24)], lambda m: (m.start_datetime, m.extracted) ) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=Extract('start_datetime', 'week_day')).order_by('start_datetime'), [ (start_datetime, (start_datetime.isoweekday() % 7) + 1), (end_datetime, (end_datetime.isoweekday() % 7) + 1) ], lambda m: (m.start_datetime, m.extracted) ) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=Extract('start_datetime', 'hour')).order_by('start_datetime'), [(start_datetime, start_datetime.hour), (end_datetime, end_datetime.hour)], lambda m: (m.start_datetime, m.extracted) ) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=Extract('start_datetime', 'minute')).order_by('start_datetime'), [(start_datetime, start_datetime.minute), (end_datetime, end_datetime.minute)], lambda m: (m.start_datetime, m.extracted) ) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=Extract('start_datetime', 'second')).order_by('start_datetime'), [(start_datetime, start_datetime.second), (end_datetime, end_datetime.second)], lambda m: (m.start_datetime, m.extracted) ) self.assertEqual(DTModel.objects.filter(start_datetime__year=Extract('start_datetime', 'year')).count(), 2) self.assertEqual(DTModel.objects.filter(start_datetime__hour=Extract('start_datetime', 'hour')).count(), 2) self.assertEqual(DTModel.objects.filter(start_date__month=Extract('start_date', 'month')).count(), 2) self.assertEqual(DTModel.objects.filter(start_time__hour=Extract('start_time', 'hour')).count(), 2) def test_extract_none(self): self.create_model(None, None) for t in (Extract('start_datetime', 'year'), Extract('start_date', 'year'), Extract('start_time', 'hour')): with self.subTest(t): self.assertIsNone(DTModel.objects.annotate(extracted=t).first().extracted) @skipUnlessDBFeature('has_native_duration_field') def test_extract_duration(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123) if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime, is_dst=False) end_datetime = timezone.make_aware(end_datetime, is_dst=False) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=Extract('duration', 'second')).order_by('start_datetime'), [ (start_datetime, (end_datetime - start_datetime).seconds % 60), (end_datetime, (start_datetime - end_datetime).seconds % 60) ], lambda m: (m.start_datetime, m.extracted) ) self.assertEqual( DTModel.objects.annotate( duration_days=Extract('duration', 'day'), ).filter(duration_days__gt=200).count(), 1 ) @skipIfDBFeature('has_native_duration_field') def test_extract_duration_without_native_duration_field(self): msg = 'Extract requires native DurationField database support.' with self.assertRaisesMessage(ValueError, msg): list(DTModel.objects.annotate(extracted=Extract('duration', 'second'))) def test_extract_duration_unsupported_lookups(self): msg = "Cannot extract component '%s' from DurationField 'duration'." for lookup in ('year', 'iso_year', 'month', 'week', 'week_day', 'quarter'): with self.subTest(lookup): with self.assertRaisesMessage(ValueError, msg % lookup): DTModel.objects.annotate(extracted=Extract('duration', lookup)) def test_extract_year_func(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123) if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime, is_dst=False) end_datetime = timezone.make_aware(end_datetime, is_dst=False) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=ExtractYear('start_datetime')).order_by('start_datetime'), [(start_datetime, start_datetime.year), (end_datetime, end_datetime.year)], lambda m: (m.start_datetime, m.extracted) ) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=ExtractYear('start_date')).order_by('start_datetime'), [(start_datetime, start_datetime.year), (end_datetime, end_datetime.year)], lambda m: (m.start_datetime, m.extracted) ) self.assertEqual(DTModel.objects.filter(start_datetime__year=ExtractYear('start_datetime')).count(), 2) def test_extract_iso_year_func(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123) if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime, is_dst=False) end_datetime = timezone.make_aware(end_datetime, is_dst=False) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=ExtractIsoYear('start_datetime')).order_by('start_datetime'), [(start_datetime, start_datetime.year), (end_datetime, end_datetime.year)], lambda m: (m.start_datetime, m.extracted) ) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=ExtractIsoYear('start_date')).order_by('start_datetime'), [(start_datetime, start_datetime.year), (end_datetime, end_datetime.year)], lambda m: (m.start_datetime, m.extracted) ) # Both dates are from the same week year. self.assertEqual(DTModel.objects.filter(start_datetime__iso_year=ExtractIsoYear('start_datetime')).count(), 2) def test_extract_iso_year_func_boundaries(self): end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123) if settings.USE_TZ: end_datetime = timezone.make_aware(end_datetime, is_dst=False) week_52_day_2014 = datetime(2014, 12, 27, 13, 0) # Sunday week_1_day_2014_2015 = datetime(2014, 12, 31, 13, 0) # Wednesday week_53_day_2015 = datetime(2015, 12, 31, 13, 0) # Thursday if settings.USE_TZ: week_1_day_2014_2015 = timezone.make_aware(week_1_day_2014_2015, is_dst=False) week_52_day_2014 = timezone.make_aware(week_52_day_2014, is_dst=False) week_53_day_2015 = timezone.make_aware(week_53_day_2015, is_dst=False) days = [week_52_day_2014, week_1_day_2014_2015, week_53_day_2015] self.create_model(week_53_day_2015, end_datetime) self.create_model(week_52_day_2014, end_datetime) self.create_model(week_1_day_2014_2015, end_datetime) qs = DTModel.objects.filter(start_datetime__in=days).annotate( extracted=ExtractIsoYear('start_datetime'), ).order_by('start_datetime') self.assertQuerysetEqual(qs, [ (week_52_day_2014, 2014), (week_1_day_2014_2015, 2015), (week_53_day_2015, 2015), ], lambda m: (m.start_datetime, m.extracted)) def test_extract_month_func(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123) if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime, is_dst=False) end_datetime = timezone.make_aware(end_datetime, is_dst=False) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=ExtractMonth('start_datetime')).order_by('start_datetime'), [(start_datetime, start_datetime.month), (end_datetime, end_datetime.month)], lambda m: (m.start_datetime, m.extracted) ) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=ExtractMonth('start_date')).order_by('start_datetime'), [(start_datetime, start_datetime.month), (end_datetime, end_datetime.month)], lambda m: (m.start_datetime, m.extracted) ) self.assertEqual(DTModel.objects.filter(start_datetime__month=ExtractMonth('start_datetime')).count(), 2) def test_extract_day_func(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123) if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime, is_dst=False) end_datetime = timezone.make_aware(end_datetime, is_dst=False) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=ExtractDay('start_datetime')).order_by('start_datetime'), [(start_datetime, start_datetime.day), (end_datetime, end_datetime.day)], lambda m: (m.start_datetime, m.extracted) ) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=ExtractDay('start_date')).order_by('start_datetime'), [(start_datetime, start_datetime.day), (end_datetime, end_datetime.day)], lambda m: (m.start_datetime, m.extracted) ) self.assertEqual(DTModel.objects.filter(start_datetime__day=ExtractDay('start_datetime')).count(), 2) def test_extract_week_func(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123) if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime, is_dst=False) end_datetime = timezone.make_aware(end_datetime, is_dst=False) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=ExtractWeek('start_datetime')).order_by('start_datetime'), [(start_datetime, 25), (end_datetime, 24)], lambda m: (m.start_datetime, m.extracted) ) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=ExtractWeek('start_date')).order_by('start_datetime'), [(start_datetime, 25), (end_datetime, 24)], lambda m: (m.start_datetime, m.extracted) ) # both dates are from the same week. self.assertEqual(DTModel.objects.filter(start_datetime__week=ExtractWeek('start_datetime')).count(), 2) def test_extract_quarter_func(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = datetime(2016, 8, 15, 14, 10, 50, 123) if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime, is_dst=False) end_datetime = timezone.make_aware(end_datetime, is_dst=False) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=ExtractQuarter('start_datetime')).order_by('start_datetime'), [(start_datetime, 2), (end_datetime, 3)], lambda m: (m.start_datetime, m.extracted) ) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=ExtractQuarter('start_date')).order_by('start_datetime'), [(start_datetime, 2), (end_datetime, 3)], lambda m: (m.start_datetime, m.extracted) ) self.assertEqual(DTModel.objects.filter(start_datetime__quarter=ExtractQuarter('start_datetime')).count(), 2) def test_extract_quarter_func_boundaries(self): end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123) if settings.USE_TZ: end_datetime = timezone.make_aware(end_datetime, is_dst=False) last_quarter_2014 = datetime(2014, 12, 31, 13, 0) first_quarter_2015 = datetime(2015, 1, 1, 13, 0) if settings.USE_TZ: last_quarter_2014 = timezone.make_aware(last_quarter_2014, is_dst=False) first_quarter_2015 = timezone.make_aware(first_quarter_2015, is_dst=False) dates = [last_quarter_2014, first_quarter_2015] self.create_model(last_quarter_2014, end_datetime) self.create_model(first_quarter_2015, end_datetime) qs = DTModel.objects.filter(start_datetime__in=dates).annotate( extracted=ExtractQuarter('start_datetime'), ).order_by('start_datetime') self.assertQuerysetEqual(qs, [ (last_quarter_2014, 4), (first_quarter_2015, 1), ], lambda m: (m.start_datetime, m.extracted)) def test_extract_week_func_boundaries(self): end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123) if settings.USE_TZ: end_datetime = timezone.make_aware(end_datetime, is_dst=False) week_52_day_2014 = datetime(2014, 12, 27, 13, 0) # Sunday week_1_day_2014_2015 = datetime(2014, 12, 31, 13, 0) # Wednesday week_53_day_2015 = datetime(2015, 12, 31, 13, 0) # Thursday if settings.USE_TZ: week_1_day_2014_2015 = timezone.make_aware(week_1_day_2014_2015, is_dst=False) week_52_day_2014 = timezone.make_aware(week_52_day_2014, is_dst=False) week_53_day_2015 = timezone.make_aware(week_53_day_2015, is_dst=False) days = [week_52_day_2014, week_1_day_2014_2015, week_53_day_2015] self.create_model(week_53_day_2015, end_datetime) self.create_model(week_52_day_2014, end_datetime) self.create_model(week_1_day_2014_2015, end_datetime) qs = DTModel.objects.filter(start_datetime__in=days).annotate( extracted=ExtractWeek('start_datetime'), ).order_by('start_datetime') self.assertQuerysetEqual(qs, [ (week_52_day_2014, 52), (week_1_day_2014_2015, 1), (week_53_day_2015, 53), ], lambda m: (m.start_datetime, m.extracted)) def test_extract_weekday_func(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123) if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime, is_dst=False) end_datetime = timezone.make_aware(end_datetime, is_dst=False) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=ExtractWeekDay('start_datetime')).order_by('start_datetime'), [ (start_datetime, (start_datetime.isoweekday() % 7) + 1), (end_datetime, (end_datetime.isoweekday() % 7) + 1), ], lambda m: (m.start_datetime, m.extracted) ) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=ExtractWeekDay('start_date')).order_by('start_datetime'), [ (start_datetime, (start_datetime.isoweekday() % 7) + 1), (end_datetime, (end_datetime.isoweekday() % 7) + 1), ], lambda m: (m.start_datetime, m.extracted) ) self.assertEqual(DTModel.objects.filter(start_datetime__week_day=ExtractWeekDay('start_datetime')).count(), 2) def test_extract_hour_func(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123) if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime, is_dst=False) end_datetime = timezone.make_aware(end_datetime, is_dst=False) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=ExtractHour('start_datetime')).order_by('start_datetime'), [(start_datetime, start_datetime.hour), (end_datetime, end_datetime.hour)], lambda m: (m.start_datetime, m.extracted) ) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=ExtractHour('start_time')).order_by('start_datetime'), [(start_datetime, start_datetime.hour), (end_datetime, end_datetime.hour)], lambda m: (m.start_datetime, m.extracted) ) self.assertEqual(DTModel.objects.filter(start_datetime__hour=ExtractHour('start_datetime')).count(), 2) def test_extract_minute_func(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123) if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime, is_dst=False) end_datetime = timezone.make_aware(end_datetime, is_dst=False) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=ExtractMinute('start_datetime')).order_by('start_datetime'), [(start_datetime, start_datetime.minute), (end_datetime, end_datetime.minute)], lambda m: (m.start_datetime, m.extracted) ) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=ExtractMinute('start_time')).order_by('start_datetime'), [(start_datetime, start_datetime.minute), (end_datetime, end_datetime.minute)], lambda m: (m.start_datetime, m.extracted) ) self.assertEqual(DTModel.objects.filter(start_datetime__minute=ExtractMinute('start_datetime')).count(), 2) def test_extract_second_func(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123) if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime, is_dst=False) end_datetime = timezone.make_aware(end_datetime, is_dst=False) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=ExtractSecond('start_datetime')).order_by('start_datetime'), [(start_datetime, start_datetime.second), (end_datetime, end_datetime.second)], lambda m: (m.start_datetime, m.extracted) ) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=ExtractSecond('start_time')).order_by('start_datetime'), [(start_datetime, start_datetime.second), (end_datetime, end_datetime.second)], lambda m: (m.start_datetime, m.extracted) ) self.assertEqual(DTModel.objects.filter(start_datetime__second=ExtractSecond('start_datetime')).count(), 2) def test_trunc_func(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123) if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime, is_dst=False) end_datetime = timezone.make_aware(end_datetime, is_dst=False) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) msg = 'output_field must be either DateField, TimeField, or DateTimeField' with self.assertRaisesMessage(ValueError, msg): list(DTModel.objects.annotate(truncated=Trunc('start_datetime', 'year', output_field=IntegerField()))) with self.assertRaisesMessage(AssertionError, "'name' isn't a DateField, TimeField, or DateTimeField."): list(DTModel.objects.annotate(truncated=Trunc('name', 'year', output_field=DateTimeField()))) with self.assertRaisesMessage(ValueError, "Cannot truncate DateField 'start_date' to DateTimeField"): list(DTModel.objects.annotate(truncated=Trunc('start_date', 'second'))) with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField"): list(DTModel.objects.annotate(truncated=Trunc('start_time', 'month'))) with self.assertRaisesMessage(ValueError, "Cannot truncate DateField 'start_date' to DateTimeField"): list(DTModel.objects.annotate(truncated=Trunc('start_date', 'month', output_field=DateTimeField()))) with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField"): list(DTModel.objects.annotate(truncated=Trunc('start_time', 'second', output_field=DateTimeField()))) def test_datetime_kind(kind): self.assertQuerysetEqual( DTModel.objects.annotate( truncated=Trunc('start_datetime', kind, output_field=DateTimeField()) ).order_by('start_datetime'), [ (start_datetime, truncate_to(start_datetime, kind)), (end_datetime, truncate_to(end_datetime, kind)) ], lambda m: (m.start_datetime, m.truncated) ) def test_date_kind(kind): self.assertQuerysetEqual( DTModel.objects.annotate( truncated=Trunc('start_date', kind, output_field=DateField()) ).order_by('start_datetime'), [ (start_datetime, truncate_to(start_datetime.date(), kind)), (end_datetime, truncate_to(end_datetime.date(), kind)) ], lambda m: (m.start_datetime, m.truncated) ) def test_time_kind(kind): self.assertQuerysetEqual( DTModel.objects.annotate( truncated=Trunc('start_time', kind, output_field=TimeField()) ).order_by('start_datetime'), [ (start_datetime, truncate_to(start_datetime.time(), kind)), (end_datetime, truncate_to(end_datetime.time(), kind)) ], lambda m: (m.start_datetime, m.truncated) ) test_date_kind('year') test_date_kind('quarter') test_date_kind('month') test_date_kind('week') test_date_kind('day') test_time_kind('hour') test_time_kind('minute') test_time_kind('second') test_datetime_kind('year') test_datetime_kind('quarter') test_datetime_kind('month') test_datetime_kind('week') test_datetime_kind('day') test_datetime_kind('hour') test_datetime_kind('minute') test_datetime_kind('second') qs = DTModel.objects.filter(start_datetime__date=Trunc('start_datetime', 'day', output_field=DateField())) self.assertEqual(qs.count(), 2) def test_trunc_none(self): self.create_model(None, None) for t in (Trunc('start_datetime', 'year'), Trunc('start_date', 'year'), Trunc('start_time', 'hour')): with self.subTest(t): self.assertIsNone(DTModel.objects.annotate(truncated=t).first().truncated) def test_trunc_year_func(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = truncate_to(datetime(2016, 6, 15, 14, 10, 50, 123), 'year') if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime, is_dst=False) end_datetime = timezone.make_aware(end_datetime, is_dst=False) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=TruncYear('start_datetime')).order_by('start_datetime'), [ (start_datetime, truncate_to(start_datetime, 'year')), (end_datetime, truncate_to(end_datetime, 'year')), ], lambda m: (m.start_datetime, m.extracted) ) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=TruncYear('start_date')).order_by('start_datetime'), [ (start_datetime, truncate_to(start_datetime.date(), 'year')), (end_datetime, truncate_to(end_datetime.date(), 'year')), ], lambda m: (m.start_datetime, m.extracted) ) self.assertEqual(DTModel.objects.filter(start_datetime=TruncYear('start_datetime')).count(), 1) with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField"): list(DTModel.objects.annotate(truncated=TruncYear('start_time'))) with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField"): list(DTModel.objects.annotate(truncated=TruncYear('start_time', output_field=TimeField()))) def test_trunc_quarter_func(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = truncate_to(datetime(2016, 10, 15, 14, 10, 50, 123), 'quarter') last_quarter_2015 = truncate_to(datetime(2015, 12, 31, 14, 10, 50, 123), 'quarter') first_quarter_2016 = truncate_to(datetime(2016, 1, 1, 14, 10, 50, 123), 'quarter') if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime, is_dst=False) end_datetime = timezone.make_aware(end_datetime, is_dst=False) last_quarter_2015 = timezone.make_aware(last_quarter_2015, is_dst=False) first_quarter_2016 = timezone.make_aware(first_quarter_2016, is_dst=False) self.create_model(start_datetime=start_datetime, end_datetime=end_datetime) self.create_model(start_datetime=end_datetime, end_datetime=start_datetime) self.create_model(start_datetime=last_quarter_2015, end_datetime=end_datetime) self.create_model(start_datetime=first_quarter_2016, end_datetime=end_datetime) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=TruncQuarter('start_date')).order_by('start_datetime'), [ (start_datetime, truncate_to(start_datetime.date(), 'quarter')), (last_quarter_2015, truncate_to(last_quarter_2015.date(), 'quarter')), (first_quarter_2016, truncate_to(first_quarter_2016.date(), 'quarter')), (end_datetime, truncate_to(end_datetime.date(), 'quarter')), ], lambda m: (m.start_datetime, m.extracted) ) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=TruncQuarter('start_datetime')).order_by('start_datetime'), [ (start_datetime, truncate_to(start_datetime, 'quarter')), (last_quarter_2015, truncate_to(last_quarter_2015, 'quarter')), (first_quarter_2016, truncate_to(first_quarter_2016, 'quarter')), (end_datetime, truncate_to(end_datetime, 'quarter')), ], lambda m: (m.start_datetime, m.extracted) ) with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField"): list(DTModel.objects.annotate(truncated=TruncQuarter('start_time'))) with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField"): list(DTModel.objects.annotate(truncated=TruncQuarter('start_time', output_field=TimeField()))) def test_trunc_month_func(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = truncate_to(datetime(2016, 6, 15, 14, 10, 50, 123), 'month') if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime, is_dst=False) end_datetime = timezone.make_aware(end_datetime, is_dst=False) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=TruncMonth('start_datetime')).order_by('start_datetime'), [ (start_datetime, truncate_to(start_datetime, 'month')), (end_datetime, truncate_to(end_datetime, 'month')), ], lambda m: (m.start_datetime, m.extracted) ) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=TruncMonth('start_date')).order_by('start_datetime'), [ (start_datetime, truncate_to(start_datetime.date(), 'month')), (end_datetime, truncate_to(end_datetime.date(), 'month')), ], lambda m: (m.start_datetime, m.extracted) ) self.assertEqual(DTModel.objects.filter(start_datetime=TruncMonth('start_datetime')).count(), 1) with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField"): list(DTModel.objects.annotate(truncated=TruncMonth('start_time'))) with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField"): list(DTModel.objects.annotate(truncated=TruncMonth('start_time', output_field=TimeField()))) def test_trunc_week_func(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = truncate_to(datetime(2016, 6, 15, 14, 10, 50, 123), 'week') if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime, is_dst=False) end_datetime = timezone.make_aware(end_datetime, is_dst=False) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=TruncWeek('start_datetime')).order_by('start_datetime'), [ (start_datetime, truncate_to(start_datetime, 'week')), (end_datetime, truncate_to(end_datetime, 'week')), ], lambda m: (m.start_datetime, m.extracted) ) self.assertEqual(DTModel.objects.filter(start_datetime=TruncWeek('start_datetime')).count(), 1) with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField"): list(DTModel.objects.annotate(truncated=TruncWeek('start_time'))) with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField"): list(DTModel.objects.annotate(truncated=TruncWeek('start_time', output_field=TimeField()))) def test_trunc_date_func(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123) if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime, is_dst=False) end_datetime = timezone.make_aware(end_datetime, is_dst=False) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=TruncDate('start_datetime')).order_by('start_datetime'), [ (start_datetime, start_datetime.date()), (end_datetime, end_datetime.date()), ], lambda m: (m.start_datetime, m.extracted) ) self.assertEqual(DTModel.objects.filter(start_datetime__date=TruncDate('start_datetime')).count(), 2) with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateField"): list(DTModel.objects.annotate(truncated=TruncDate('start_time'))) with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateField"): list(DTModel.objects.annotate(truncated=TruncDate('start_time', output_field=TimeField()))) def test_trunc_date_none(self): self.create_model(None, None) self.assertIsNone(DTModel.objects.annotate(truncated=TruncDate('start_datetime')).first().truncated) def test_trunc_time_func(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123) if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime, is_dst=False) end_datetime = timezone.make_aware(end_datetime, is_dst=False) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=TruncTime('start_datetime')).order_by('start_datetime'), [ (start_datetime, start_datetime.time()), (end_datetime, end_datetime.time()), ], lambda m: (m.start_datetime, m.extracted) ) self.assertEqual(DTModel.objects.filter(start_datetime__time=TruncTime('start_datetime')).count(), 2) with self.assertRaisesMessage(ValueError, "Cannot truncate DateField 'start_date' to TimeField"): list(DTModel.objects.annotate(truncated=TruncTime('start_date'))) with self.assertRaisesMessage(ValueError, "Cannot truncate DateField 'start_date' to TimeField"): list(DTModel.objects.annotate(truncated=TruncTime('start_date', output_field=DateField()))) def test_trunc_time_none(self): self.create_model(None, None) self.assertIsNone(DTModel.objects.annotate(truncated=TruncTime('start_datetime')).first().truncated) def test_trunc_day_func(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = truncate_to(datetime(2016, 6, 15, 14, 10, 50, 123), 'day') if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime, is_dst=False) end_datetime = timezone.make_aware(end_datetime, is_dst=False) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=TruncDay('start_datetime')).order_by('start_datetime'), [ (start_datetime, truncate_to(start_datetime, 'day')), (end_datetime, truncate_to(end_datetime, 'day')), ], lambda m: (m.start_datetime, m.extracted) ) self.assertEqual(DTModel.objects.filter(start_datetime=TruncDay('start_datetime')).count(), 1) with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField"): list(DTModel.objects.annotate(truncated=TruncDay('start_time'))) with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField"): list(DTModel.objects.annotate(truncated=TruncDay('start_time', output_field=TimeField()))) def test_trunc_hour_func(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = truncate_to(datetime(2016, 6, 15, 14, 10, 50, 123), 'hour') if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime, is_dst=False) end_datetime = timezone.make_aware(end_datetime, is_dst=False) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=TruncHour('start_datetime')).order_by('start_datetime'), [ (start_datetime, truncate_to(start_datetime, 'hour')), (end_datetime, truncate_to(end_datetime, 'hour')), ], lambda m: (m.start_datetime, m.extracted) ) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=TruncHour('start_time')).order_by('start_datetime'), [ (start_datetime, truncate_to(start_datetime.time(), 'hour')), (end_datetime, truncate_to(end_datetime.time(), 'hour')), ], lambda m: (m.start_datetime, m.extracted) ) self.assertEqual(DTModel.objects.filter(start_datetime=TruncHour('start_datetime')).count(), 1) with self.assertRaisesMessage(ValueError, "Cannot truncate DateField 'start_date' to DateTimeField"): list(DTModel.objects.annotate(truncated=TruncHour('start_date'))) with self.assertRaisesMessage(ValueError, "Cannot truncate DateField 'start_date' to DateTimeField"): list(DTModel.objects.annotate(truncated=TruncHour('start_date', output_field=DateField()))) def test_trunc_minute_func(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = truncate_to(datetime(2016, 6, 15, 14, 10, 50, 123), 'minute') if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime, is_dst=False) end_datetime = timezone.make_aware(end_datetime, is_dst=False) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=TruncMinute('start_datetime')).order_by('start_datetime'), [ (start_datetime, truncate_to(start_datetime, 'minute')), (end_datetime, truncate_to(end_datetime, 'minute')), ], lambda m: (m.start_datetime, m.extracted) ) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=TruncMinute('start_time')).order_by('start_datetime'), [ (start_datetime, truncate_to(start_datetime.time(), 'minute')), (end_datetime, truncate_to(end_datetime.time(), 'minute')), ], lambda m: (m.start_datetime, m.extracted) ) self.assertEqual(DTModel.objects.filter(start_datetime=TruncMinute('start_datetime')).count(), 1) with self.assertRaisesMessage(ValueError, "Cannot truncate DateField 'start_date' to DateTimeField"): list(DTModel.objects.annotate(truncated=TruncMinute('start_date'))) with self.assertRaisesMessage(ValueError, "Cannot truncate DateField 'start_date' to DateTimeField"): list(DTModel.objects.annotate(truncated=TruncMinute('start_date', output_field=DateField()))) def test_trunc_second_func(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = truncate_to(datetime(2016, 6, 15, 14, 10, 50, 123), 'second') if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime, is_dst=False) end_datetime = timezone.make_aware(end_datetime, is_dst=False) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=TruncSecond('start_datetime')).order_by('start_datetime'), [ (start_datetime, truncate_to(start_datetime, 'second')), (end_datetime, truncate_to(end_datetime, 'second')) ], lambda m: (m.start_datetime, m.extracted) ) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=TruncSecond('start_time')).order_by('start_datetime'), [ (start_datetime, truncate_to(start_datetime.time(), 'second')), (end_datetime, truncate_to(end_datetime.time(), 'second')) ], lambda m: (m.start_datetime, m.extracted) ) self.assertEqual(DTModel.objects.filter(start_datetime=TruncSecond('start_datetime')).count(), 1) with self.assertRaisesMessage(ValueError, "Cannot truncate DateField 'start_date' to DateTimeField"): list(DTModel.objects.annotate(truncated=TruncSecond('start_date'))) with self.assertRaisesMessage(ValueError, "Cannot truncate DateField 'start_date' to DateTimeField"): list(DTModel.objects.annotate(truncated=TruncSecond('start_date', output_field=DateField()))) def test_trunc_subquery_with_parameters(self): author_1 = Author.objects.create(name='J. R. R. Tolkien') author_2 = Author.objects.create(name='G. R. R. Martin') fan_since_1 = datetime(2016, 2, 3, 15, 0, 0) fan_since_2 = datetime(2015, 2, 3, 15, 0, 0) fan_since_3 = datetime(2017, 2, 3, 15, 0, 0) if settings.USE_TZ: fan_since_1 = timezone.make_aware(fan_since_1, is_dst=False) fan_since_2 = timezone.make_aware(fan_since_2, is_dst=False) fan_since_3 = timezone.make_aware(fan_since_3, is_dst=False) Fan.objects.create(author=author_1, name='Tom', fan_since=fan_since_1) Fan.objects.create(author=author_1, name='Emma', fan_since=fan_since_2) Fan.objects.create(author=author_2, name='Isabella', fan_since=fan_since_3) inner = Fan.objects.filter( author=OuterRef('pk'), name__in=('Emma', 'Isabella', 'Tom') ).values('author').annotate(newest_fan=Max('fan_since')).values('newest_fan') outer = Author.objects.annotate( newest_fan_year=TruncYear(Subquery(inner, output_field=DateTimeField())) ) tz = pytz.UTC if settings.USE_TZ else None self.assertSequenceEqual( outer.order_by('name').values('name', 'newest_fan_year'), [ {'name': 'G. R. R. Martin', 'newest_fan_year': datetime(2017, 1, 1, 0, 0, tzinfo=tz)}, {'name': 'J. R. R. Tolkien', 'newest_fan_year': datetime(2016, 1, 1, 0, 0, tzinfo=tz)}, ] ) @override_settings(USE_TZ=True, TIME_ZONE='UTC') class DateFunctionWithTimeZoneTests(DateFunctionTests): def test_extract_func_with_timezone(self): start_datetime = datetime(2015, 6, 15, 23, 30, 1, 321) end_datetime = datetime(2015, 6, 16, 13, 11, 27, 123) start_datetime = timezone.make_aware(start_datetime, is_dst=False) end_datetime = timezone.make_aware(end_datetime, is_dst=False) self.create_model(start_datetime, end_datetime) melb = pytz.timezone('Australia/Melbourne') delta_tzinfo_pos = datetime_timezone(timedelta(hours=5)) delta_tzinfo_neg = datetime_timezone(timedelta(hours=-5, minutes=17)) qs = DTModel.objects.annotate( day=Extract('start_datetime', 'day'), day_melb=Extract('start_datetime', 'day', tzinfo=melb), week=Extract('start_datetime', 'week', tzinfo=melb), isoyear=ExtractIsoYear('start_datetime', tzinfo=melb), weekday=ExtractWeekDay('start_datetime'), weekday_melb=ExtractWeekDay('start_datetime', tzinfo=melb), quarter=ExtractQuarter('start_datetime', tzinfo=melb), hour=ExtractHour('start_datetime'), hour_melb=ExtractHour('start_datetime', tzinfo=melb), hour_with_delta_pos=ExtractHour('start_datetime', tzinfo=delta_tzinfo_pos), hour_with_delta_neg=ExtractHour('start_datetime', tzinfo=delta_tzinfo_neg), minute_with_delta_neg=ExtractMinute('start_datetime', tzinfo=delta_tzinfo_neg), ).order_by('start_datetime') utc_model = qs.get() self.assertEqual(utc_model.day, 15) self.assertEqual(utc_model.day_melb, 16) self.assertEqual(utc_model.week, 25) self.assertEqual(utc_model.isoyear, 2015) self.assertEqual(utc_model.weekday, 2) self.assertEqual(utc_model.weekday_melb, 3) self.assertEqual(utc_model.quarter, 2) self.assertEqual(utc_model.hour, 23) self.assertEqual(utc_model.hour_melb, 9) self.assertEqual(utc_model.hour_with_delta_pos, 4) self.assertEqual(utc_model.hour_with_delta_neg, 18) self.assertEqual(utc_model.minute_with_delta_neg, 47) with timezone.override(melb): melb_model = qs.get() self.assertEqual(melb_model.day, 16) self.assertEqual(melb_model.day_melb, 16) self.assertEqual(melb_model.week, 25) self.assertEqual(melb_model.isoyear, 2015) self.assertEqual(melb_model.weekday, 3) self.assertEqual(melb_model.quarter, 2) self.assertEqual(melb_model.weekday_melb, 3) self.assertEqual(melb_model.hour, 9) self.assertEqual(melb_model.hour_melb, 9) def test_extract_func_explicit_timezone_priority(self): start_datetime = datetime(2015, 6, 15, 23, 30, 1, 321) end_datetime = datetime(2015, 6, 16, 13, 11, 27, 123) start_datetime = timezone.make_aware(start_datetime, is_dst=False) end_datetime = timezone.make_aware(end_datetime, is_dst=False) self.create_model(start_datetime, end_datetime) melb = pytz.timezone('Australia/Melbourne') with timezone.override(melb): model = DTModel.objects.annotate( day_melb=Extract('start_datetime', 'day'), day_utc=Extract('start_datetime', 'day', tzinfo=timezone.utc), ).order_by('start_datetime').get() self.assertEqual(model.day_melb, 16) self.assertEqual(model.day_utc, 15) def test_trunc_timezone_applied_before_truncation(self): start_datetime = datetime(2016, 1, 1, 1, 30, 50, 321) end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123) start_datetime = timezone.make_aware(start_datetime, is_dst=False) end_datetime = timezone.make_aware(end_datetime, is_dst=False) self.create_model(start_datetime, end_datetime) melb = pytz.timezone('Australia/Melbourne') pacific = pytz.timezone('US/Pacific') model = DTModel.objects.annotate( melb_year=TruncYear('start_datetime', tzinfo=melb), pacific_year=TruncYear('start_datetime', tzinfo=pacific), ).order_by('start_datetime').get() self.assertEqual(model.start_datetime, start_datetime) self.assertEqual(model.melb_year, truncate_to(start_datetime, 'year', melb)) self.assertEqual(model.pacific_year, truncate_to(start_datetime, 'year', pacific)) self.assertEqual(model.start_datetime.year, 2016) self.assertEqual(model.melb_year.year, 2016) self.assertEqual(model.pacific_year.year, 2015) def test_trunc_ambiguous_and_invalid_times(self): sao = pytz.timezone('America/Sao_Paulo') utc = pytz.timezone('UTC') start_datetime = utc.localize(datetime(2016, 10, 16, 13)) end_datetime = utc.localize(datetime(2016, 2, 21, 1)) self.create_model(start_datetime, end_datetime) with timezone.override(sao): with self.assertRaisesMessage(pytz.NonExistentTimeError, '2016-10-16 00:00:00'): model = DTModel.objects.annotate(truncated_start=TruncDay('start_datetime')).get() with self.assertRaisesMessage(pytz.AmbiguousTimeError, '2016-02-20 23:00:00'): model = DTModel.objects.annotate(truncated_end=TruncHour('end_datetime')).get() model = DTModel.objects.annotate( truncated_start=TruncDay('start_datetime', is_dst=False), truncated_end=TruncHour('end_datetime', is_dst=False), ).get() self.assertEqual(model.truncated_start.dst(), timedelta(0)) self.assertEqual(model.truncated_end.dst(), timedelta(0)) model = DTModel.objects.annotate( truncated_start=TruncDay('start_datetime', is_dst=True), truncated_end=TruncHour('end_datetime', is_dst=True), ).get() self.assertEqual(model.truncated_start.dst(), timedelta(0, 3600)) self.assertEqual(model.truncated_end.dst(), timedelta(0, 3600)) def test_trunc_func_with_timezone(self): """ If the truncated datetime transitions to a different offset (daylight saving) then the returned value will have that new timezone/offset. """ start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123) start_datetime = timezone.make_aware(start_datetime, is_dst=False) end_datetime = timezone.make_aware(end_datetime, is_dst=False) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) melb = pytz.timezone('Australia/Melbourne') def test_datetime_kind(kind): self.assertQuerysetEqual( DTModel.objects.annotate( truncated=Trunc('start_datetime', kind, output_field=DateTimeField(), tzinfo=melb) ).order_by('start_datetime'), [ (start_datetime, truncate_to(start_datetime.astimezone(melb), kind, melb)), (end_datetime, truncate_to(end_datetime.astimezone(melb), kind, melb)) ], lambda m: (m.start_datetime, m.truncated) ) def test_date_kind(kind): self.assertQuerysetEqual( DTModel.objects.annotate( truncated=Trunc('start_date', kind, output_field=DateField(), tzinfo=melb) ).order_by('start_datetime'), [ (start_datetime, truncate_to(start_datetime.date(), kind)), (end_datetime, truncate_to(end_datetime.date(), kind)) ], lambda m: (m.start_datetime, m.truncated) ) def test_time_kind(kind): self.assertQuerysetEqual( DTModel.objects.annotate( truncated=Trunc('start_time', kind, output_field=TimeField(), tzinfo=melb) ).order_by('start_datetime'), [ (start_datetime, truncate_to(start_datetime.time(), kind)), (end_datetime, truncate_to(end_datetime.time(), kind)) ], lambda m: (m.start_datetime, m.truncated) ) test_date_kind('year') test_date_kind('quarter') test_date_kind('month') test_date_kind('week') test_date_kind('day') test_time_kind('hour') test_time_kind('minute') test_time_kind('second') test_datetime_kind('year') test_datetime_kind('quarter') test_datetime_kind('month') test_datetime_kind('week') test_datetime_kind('day') test_datetime_kind('hour') test_datetime_kind('minute') test_datetime_kind('second') qs = DTModel.objects.filter(start_datetime__date=Trunc('start_datetime', 'day', output_field=DateField())) self.assertEqual(qs.count(), 2)
15e6481dd1d886d4211752df0cbdd04dd77a6b1ee2a676ac8aeb74dd27b1b9f5
import datetime from decimal import Decimal from django.db.models import Case, Count, F, OuterRef, Q, Subquery, Sum, When from django.test import TestCase from .models import Author, Book, Publisher class FilteredAggregateTests(TestCase): @classmethod def setUpTestData(cls): cls.a1 = Author.objects.create(name='test', age=40) cls.a2 = Author.objects.create(name='test2', age=60) cls.a3 = Author.objects.create(name='test3', age=100) cls.p1 = Publisher.objects.create(name='Apress', num_awards=3, duration=datetime.timedelta(days=1)) cls.b1 = Book.objects.create( isbn='159059725', name='The Definitive Guide to Django: Web Development Done Right', pages=447, rating=4.5, price=Decimal('30.00'), contact=cls.a1, publisher=cls.p1, pubdate=datetime.date(2007, 12, 6), ) cls.b2 = Book.objects.create( isbn='067232959', name='Sams Teach Yourself Django in 24 Hours', pages=528, rating=3.0, price=Decimal('23.09'), contact=cls.a2, publisher=cls.p1, pubdate=datetime.date(2008, 3, 3), ) cls.b3 = Book.objects.create( isbn='159059996', name='Practical Django Projects', pages=600, rating=4.5, price=Decimal('29.69'), contact=cls.a3, publisher=cls.p1, pubdate=datetime.date(2008, 6, 23), ) cls.a1.friends.add(cls.a2) cls.a1.friends.add(cls.a3) cls.b1.authors.add(cls.a1) cls.b1.authors.add(cls.a3) cls.b2.authors.add(cls.a2) cls.b3.authors.add(cls.a3) def test_filtered_aggregates(self): agg = Sum('age', filter=Q(name__startswith='test')) self.assertEqual(Author.objects.aggregate(age=agg)['age'], 200) def test_double_filtered_aggregates(self): agg = Sum('age', filter=Q(Q(name='test2') & ~Q(name='test'))) self.assertEqual(Author.objects.aggregate(age=agg)['age'], 60) def test_excluded_aggregates(self): agg = Sum('age', filter=~Q(name='test2')) self.assertEqual(Author.objects.aggregate(age=agg)['age'], 140) def test_related_aggregates_m2m(self): agg = Sum('friends__age', filter=~Q(friends__name='test')) self.assertEqual(Author.objects.filter(name='test').aggregate(age=agg)['age'], 160) def test_related_aggregates_m2m_and_fk(self): q = Q(friends__book__publisher__name='Apress') & ~Q(friends__name='test3') agg = Sum('friends__book__pages', filter=q) self.assertEqual(Author.objects.filter(name='test').aggregate(pages=agg)['pages'], 528) def test_plain_annotate(self): agg = Sum('book__pages', filter=Q(book__rating__gt=3)) qs = Author.objects.annotate(pages=agg).order_by('pk') self.assertSequenceEqual([a.pages for a in qs], [447, None, 1047]) def test_filtered_aggregate_on_annotate(self): pages_annotate = Sum('book__pages', filter=Q(book__rating__gt=3)) age_agg = Sum('age', filter=Q(total_pages__gte=400)) aggregated = Author.objects.annotate(total_pages=pages_annotate).aggregate(summed_age=age_agg) self.assertEqual(aggregated, {'summed_age': 140}) def test_case_aggregate(self): agg = Sum( Case(When(friends__age=40, then=F('friends__age'))), filter=Q(friends__name__startswith='test'), ) self.assertEqual(Author.objects.aggregate(age=agg)['age'], 80) def test_sum_star_exception(self): msg = 'Star cannot be used with filter. Please specify a field.' with self.assertRaisesMessage(ValueError, msg): Count('*', filter=Q(age=40)) def test_filtered_reused_subquery(self): qs = Author.objects.annotate( older_friends_count=Count('friends', filter=Q(friends__age__gt=F('age'))), ).filter( older_friends_count__gte=2, ) self.assertEqual(qs.get(pk__in=qs.values('pk')), self.a1) def test_filtered_aggregate_ref_annotation(self): aggs = Author.objects.annotate( double_age=F('age') * 2, ).aggregate( cnt=Count('pk', filter=Q(double_age__gt=100)), ) self.assertEqual(aggs['cnt'], 2) def test_filtered_aggregate_ref_subquery_annotation(self): aggs = Author.objects.annotate( earliest_book_year=Subquery( Book.objects.filter( contact__pk=OuterRef('pk'), ).order_by('pubdate').values('pubdate__year')[:1] ), ).aggregate( cnt=Count('pk', filter=Q(earliest_book_year=2008)), ) self.assertEqual(aggs['cnt'], 2)
ad193d8acf1798427bacf79b663621ff15b0618391bf57923323d18cb14033dc
from django.contrib.admin import ModelAdmin, TabularInline from django.contrib.admin.helpers import InlineAdminForm from django.contrib.admin.tests import AdminSeleniumTestCase from django.contrib.auth.models import Permission, User from django.contrib.contenttypes.models import ContentType from django.test import RequestFactory, TestCase, override_settings from django.urls import reverse from .admin import InnerInline, site as admin_site from .models import ( Author, BinaryTree, Book, Chapter, Child, ChildModel1, ChildModel2, Fashionista, FootNote, Holder, Holder2, Holder3, Holder4, Inner, Inner2, Inner3, Inner4Stacked, Inner4Tabular, Novel, OutfitItem, Parent, ParentModelWithCustomPk, Person, Poll, Profile, ProfileCollection, Question, Sighting, SomeChildModel, SomeParentModel, Teacher, ) INLINE_CHANGELINK_HTML = 'class="inlinechangelink">Change</a>' class TestDataMixin: @classmethod def setUpTestData(cls): cls.superuser = User.objects.create_superuser(username='super', email='[email protected]', password='secret') @override_settings(ROOT_URLCONF='admin_inlines.urls') class TestInline(TestDataMixin, TestCase): factory = RequestFactory() @classmethod def setUpTestData(cls): super().setUpTestData() cls.holder = Holder.objects.create(dummy=13) Inner.objects.create(dummy=42, holder=cls.holder) def setUp(self): self.client.force_login(self.superuser) def test_can_delete(self): """ can_delete should be passed to inlineformset factory. """ response = self.client.get( reverse('admin:admin_inlines_holder_change', args=(self.holder.id,)) ) inner_formset = response.context['inline_admin_formsets'][0].formset expected = InnerInline.can_delete actual = inner_formset.can_delete self.assertEqual(expected, actual, 'can_delete must be equal') def test_readonly_stacked_inline_label(self): """Bug #13174.""" holder = Holder.objects.create(dummy=42) Inner.objects.create(holder=holder, dummy=42, readonly='') response = self.client.get( reverse('admin:admin_inlines_holder_change', args=(holder.id,)) ) self.assertContains(response, '<label>Inner readonly label:</label>') def test_many_to_many_inlines(self): "Autogenerated many-to-many inlines are displayed correctly (#13407)" response = self.client.get(reverse('admin:admin_inlines_author_add')) # The heading for the m2m inline block uses the right text self.assertContains(response, '<h2>Author-book relationships</h2>') # The "add another" label is correct self.assertContains(response, 'Add another Author-book relationship') # The '+' is dropped from the autogenerated form prefix (Author_books+) self.assertContains(response, 'id="id_Author_books-TOTAL_FORMS"') def test_inline_primary(self): person = Person.objects.create(firstname='Imelda') item = OutfitItem.objects.create(name='Shoes') # Imelda likes shoes, but can't carry her own bags. data = { 'shoppingweakness_set-TOTAL_FORMS': 1, 'shoppingweakness_set-INITIAL_FORMS': 0, 'shoppingweakness_set-MAX_NUM_FORMS': 0, '_save': 'Save', 'person': person.id, 'max_weight': 0, 'shoppingweakness_set-0-item': item.id, } response = self.client.post(reverse('admin:admin_inlines_fashionista_add'), data) self.assertEqual(response.status_code, 302) self.assertEqual(len(Fashionista.objects.filter(person__firstname='Imelda')), 1) def test_tabular_inline_column_css_class(self): """ Field names are included in the context to output a field-specific CSS class name in the column headers. """ response = self.client.get(reverse('admin:admin_inlines_poll_add')) text_field, call_me_field = list(response.context['inline_admin_formset'].fields()) # Editable field. self.assertEqual(text_field['name'], 'text') self.assertContains(response, '<th class="column-text required">') # Read-only field. self.assertEqual(call_me_field['name'], 'call_me') self.assertContains(response, '<th class="column-call_me">') def test_custom_form_tabular_inline_label(self): """ A model form with a form field specified (TitleForm.title1) should have its label rendered in the tabular inline. """ response = self.client.get(reverse('admin:admin_inlines_titlecollection_add')) self.assertContains(response, '<th class="column-title1 required">Title1</th>', html=True) def test_custom_form_tabular_inline_overridden_label(self): """ SomeChildModelForm.__init__() overrides the label of a form field. That label is displayed in the TabularInline. """ response = self.client.get(reverse('admin:admin_inlines_someparentmodel_add')) field = list(response.context['inline_admin_formset'].fields())[0] self.assertEqual(field['label'], 'new label') self.assertContains(response, '<th class="column-name required">New label</th>', html=True) def test_tabular_non_field_errors(self): """ non_field_errors are displayed correctly, including the correct value for colspan. """ data = { 'title_set-TOTAL_FORMS': 1, 'title_set-INITIAL_FORMS': 0, 'title_set-MAX_NUM_FORMS': 0, '_save': 'Save', 'title_set-0-title1': 'a title', 'title_set-0-title2': 'a different title', } response = self.client.post(reverse('admin:admin_inlines_titlecollection_add'), data) # Here colspan is "4": two fields (title1 and title2), one hidden field and the delete checkbox. self.assertContains( response, '<tr><td colspan="4"><ul class="errorlist nonfield">' '<li>The two titles must be the same</li></ul></td></tr>' ) def test_no_parent_callable_lookup(self): """Admin inline `readonly_field` shouldn't invoke parent ModelAdmin callable""" # Identically named callable isn't present in the parent ModelAdmin, # rendering of the add view shouldn't explode response = self.client.get(reverse('admin:admin_inlines_novel_add')) self.assertEqual(response.status_code, 200) # View should have the child inlines section self.assertContains( response, '<div class="js-inline-admin-formset inline-group" id="chapter_set-group"' ) def test_callable_lookup(self): """Admin inline should invoke local callable when its name is listed in readonly_fields""" response = self.client.get(reverse('admin:admin_inlines_poll_add')) self.assertEqual(response.status_code, 200) # Add parent object view should have the child inlines section self.assertContains( response, '<div class="js-inline-admin-formset inline-group" id="question_set-group"' ) # The right callable should be used for the inline readonly_fields # column cells self.assertContains(response, '<p>Callable in QuestionInline</p>') def test_help_text(self): """ The inlines' model field help texts are displayed when using both the stacked and tabular layouts. """ response = self.client.get(reverse('admin:admin_inlines_holder4_add')) self.assertContains(response, '<div class="help">Awesome stacked help text is awesome.</div>', 4) self.assertContains( response, '<img src="/static/admin/img/icon-unknown.svg" ' 'class="help help-tooltip" width="10" height="10" ' 'alt="(Awesome tabular help text is awesome.)" ' 'title="Awesome tabular help text is awesome.">', 1 ) # ReadOnly fields response = self.client.get(reverse('admin:admin_inlines_capofamiglia_add')) self.assertContains( response, '<img src="/static/admin/img/icon-unknown.svg" ' 'class="help help-tooltip" width="10" height="10" ' 'alt="(Help text for ReadOnlyInline)" ' 'title="Help text for ReadOnlyInline">', 1 ) def test_tabular_model_form_meta_readonly_field(self): """ Tabular inlines use ModelForm.Meta.help_texts and labels for read-only fields. """ response = self.client.get(reverse('admin:admin_inlines_someparentmodel_add')) self.assertContains( response, '<img src="/static/admin/img/icon-unknown.svg" ' 'class="help help-tooltip" width="10" height="10" ' 'alt="(Help text from ModelForm.Meta)" ' 'title="Help text from ModelForm.Meta">' ) self.assertContains(response, 'Label from ModelForm.Meta') def test_inline_hidden_field_no_column(self): """#18263 -- Make sure hidden fields don't get a column in tabular inlines""" parent = SomeParentModel.objects.create(name='a') SomeChildModel.objects.create(name='b', position='0', parent=parent) SomeChildModel.objects.create(name='c', position='1', parent=parent) response = self.client.get(reverse('admin:admin_inlines_someparentmodel_change', args=(parent.pk,))) self.assertNotContains(response, '<td class="field-position">') self.assertInHTML( '<input id="id_somechildmodel_set-1-position" ' 'name="somechildmodel_set-1-position" type="hidden" value="1">', response.rendered_content, ) def test_non_related_name_inline(self): """ Multiple inlines with related_name='+' have correct form prefixes. """ response = self.client.get(reverse('admin:admin_inlines_capofamiglia_add')) self.assertContains(response, '<input type="hidden" name="-1-0-id" id="id_-1-0-id">', html=True) self.assertContains( response, '<input type="hidden" name="-1-0-capo_famiglia" id="id_-1-0-capo_famiglia">', html=True ) self.assertContains( response, '<input id="id_-1-0-name" type="text" class="vTextField" name="-1-0-name" maxlength="100">', html=True ) self.assertContains(response, '<input type="hidden" name="-2-0-id" id="id_-2-0-id">', html=True) self.assertContains( response, '<input type="hidden" name="-2-0-capo_famiglia" id="id_-2-0-capo_famiglia">', html=True ) self.assertContains( response, '<input id="id_-2-0-name" type="text" class="vTextField" name="-2-0-name" maxlength="100">', html=True ) @override_settings(USE_L10N=True, USE_THOUSAND_SEPARATOR=True) def test_localize_pk_shortcut(self): """ The "View on Site" link is correct for locales that use thousand separators. """ holder = Holder.objects.create(pk=123456789, dummy=42) inner = Inner.objects.create(pk=987654321, holder=holder, dummy=42, readonly='') response = self.client.get(reverse('admin:admin_inlines_holder_change', args=(holder.id,))) inner_shortcut = 'r/%s/%s/' % (ContentType.objects.get_for_model(inner).pk, inner.pk) self.assertContains(response, inner_shortcut) def test_custom_pk_shortcut(self): """ The "View on Site" link is correct for models with a custom primary key field. """ parent = ParentModelWithCustomPk.objects.create(my_own_pk="foo", name="Foo") child1 = ChildModel1.objects.create(my_own_pk="bar", name="Bar", parent=parent) child2 = ChildModel2.objects.create(my_own_pk="baz", name="Baz", parent=parent) response = self.client.get(reverse('admin:admin_inlines_parentmodelwithcustompk_change', args=('foo',))) child1_shortcut = 'r/%s/%s/' % (ContentType.objects.get_for_model(child1).pk, child1.pk) child2_shortcut = 'r/%s/%s/' % (ContentType.objects.get_for_model(child2).pk, child2.pk) self.assertContains(response, child1_shortcut) self.assertContains(response, child2_shortcut) def test_create_inlines_on_inherited_model(self): """ An object can be created with inlines when it inherits another class. """ data = { 'name': 'Martian', 'sighting_set-TOTAL_FORMS': 1, 'sighting_set-INITIAL_FORMS': 0, 'sighting_set-MAX_NUM_FORMS': 0, 'sighting_set-0-place': 'Zone 51', '_save': 'Save', } response = self.client.post(reverse('admin:admin_inlines_extraterrestrial_add'), data) self.assertEqual(response.status_code, 302) self.assertEqual(Sighting.objects.filter(et__name='Martian').count(), 1) def test_custom_get_extra_form(self): bt_head = BinaryTree.objects.create(name="Tree Head") BinaryTree.objects.create(name="First Child", parent=bt_head) # The maximum number of forms should respect 'get_max_num' on the # ModelAdmin max_forms_input = ( '<input id="id_binarytree_set-MAX_NUM_FORMS" ' 'name="binarytree_set-MAX_NUM_FORMS" type="hidden" value="%d">' ) # The total number of forms will remain the same in either case total_forms_hidden = ( '<input id="id_binarytree_set-TOTAL_FORMS" ' 'name="binarytree_set-TOTAL_FORMS" type="hidden" value="2">' ) response = self.client.get(reverse('admin:admin_inlines_binarytree_add')) self.assertInHTML(max_forms_input % 3, response.rendered_content) self.assertInHTML(total_forms_hidden, response.rendered_content) response = self.client.get(reverse('admin:admin_inlines_binarytree_change', args=(bt_head.id,))) self.assertInHTML(max_forms_input % 2, response.rendered_content) self.assertInHTML(total_forms_hidden, response.rendered_content) def test_min_num(self): """ min_num and extra determine number of forms. """ class MinNumInline(TabularInline): model = BinaryTree min_num = 2 extra = 3 modeladmin = ModelAdmin(BinaryTree, admin_site) modeladmin.inlines = [MinNumInline] min_forms = ( '<input id="id_binarytree_set-MIN_NUM_FORMS" ' 'name="binarytree_set-MIN_NUM_FORMS" type="hidden" value="2">' ) total_forms = ( '<input id="id_binarytree_set-TOTAL_FORMS" ' 'name="binarytree_set-TOTAL_FORMS" type="hidden" value="5">' ) request = self.factory.get(reverse('admin:admin_inlines_binarytree_add')) request.user = User(username='super', is_superuser=True) response = modeladmin.changeform_view(request) self.assertInHTML(min_forms, response.rendered_content) self.assertInHTML(total_forms, response.rendered_content) def test_custom_min_num(self): bt_head = BinaryTree.objects.create(name="Tree Head") BinaryTree.objects.create(name="First Child", parent=bt_head) class MinNumInline(TabularInline): model = BinaryTree extra = 3 def get_min_num(self, request, obj=None, **kwargs): if obj: return 5 return 2 modeladmin = ModelAdmin(BinaryTree, admin_site) modeladmin.inlines = [MinNumInline] min_forms = ( '<input id="id_binarytree_set-MIN_NUM_FORMS" ' 'name="binarytree_set-MIN_NUM_FORMS" type="hidden" value="%d">' ) total_forms = ( '<input id="id_binarytree_set-TOTAL_FORMS" ' 'name="binarytree_set-TOTAL_FORMS" type="hidden" value="%d">' ) request = self.factory.get(reverse('admin:admin_inlines_binarytree_add')) request.user = User(username='super', is_superuser=True) response = modeladmin.changeform_view(request) self.assertInHTML(min_forms % 2, response.rendered_content) self.assertInHTML(total_forms % 5, response.rendered_content) request = self.factory.get(reverse('admin:admin_inlines_binarytree_change', args=(bt_head.id,))) request.user = User(username='super', is_superuser=True) response = modeladmin.changeform_view(request, object_id=str(bt_head.id)) self.assertInHTML(min_forms % 5, response.rendered_content) self.assertInHTML(total_forms % 8, response.rendered_content) def test_inline_nonauto_noneditable_pk(self): response = self.client.get(reverse('admin:admin_inlines_author_add')) self.assertContains( response, '<input id="id_nonautopkbook_set-0-rand_pk" ' 'name="nonautopkbook_set-0-rand_pk" type="hidden">', html=True ) self.assertContains( response, '<input id="id_nonautopkbook_set-2-0-rand_pk" ' 'name="nonautopkbook_set-2-0-rand_pk" type="hidden">', html=True ) def test_inline_nonauto_noneditable_inherited_pk(self): response = self.client.get(reverse('admin:admin_inlines_author_add')) self.assertContains( response, '<input id="id_nonautopkbookchild_set-0-nonautopkbook_ptr" ' 'name="nonautopkbookchild_set-0-nonautopkbook_ptr" type="hidden">', html=True ) self.assertContains( response, '<input id="id_nonautopkbookchild_set-2-nonautopkbook_ptr" ' 'name="nonautopkbookchild_set-2-nonautopkbook_ptr" type="hidden">', html=True ) def test_inline_editable_pk(self): response = self.client.get(reverse('admin:admin_inlines_author_add')) self.assertContains( response, '<input class="vIntegerField" id="id_editablepkbook_set-0-manual_pk" ' 'name="editablepkbook_set-0-manual_pk" type="number">', html=True, count=1 ) self.assertContains( response, '<input class="vIntegerField" id="id_editablepkbook_set-2-0-manual_pk" ' 'name="editablepkbook_set-2-0-manual_pk" type="number">', html=True, count=1 ) def test_stacked_inline_edit_form_contains_has_original_class(self): holder = Holder.objects.create(dummy=1) holder.inner_set.create(dummy=1) response = self.client.get(reverse('admin:admin_inlines_holder_change', args=(holder.pk,))) self.assertContains( response, '<div class="inline-related has_original" id="inner_set-0">', count=1 ) self.assertContains( response, '<div class="inline-related" id="inner_set-1">', count=1 ) def test_inlines_show_change_link_registered(self): "Inlines `show_change_link` for registered models when enabled." holder = Holder4.objects.create(dummy=1) item1 = Inner4Stacked.objects.create(dummy=1, holder=holder) item2 = Inner4Tabular.objects.create(dummy=1, holder=holder) items = ( ('inner4stacked', item1.pk), ('inner4tabular', item2.pk), ) response = self.client.get(reverse('admin:admin_inlines_holder4_change', args=(holder.pk,))) self.assertTrue(response.context['inline_admin_formset'].opts.has_registered_model) for model, pk in items: url = reverse('admin:admin_inlines_%s_change' % model, args=(pk,)) self.assertContains(response, '<a href="%s" %s' % (url, INLINE_CHANGELINK_HTML)) def test_inlines_show_change_link_unregistered(self): "Inlines `show_change_link` disabled for unregistered models." parent = ParentModelWithCustomPk.objects.create(my_own_pk="foo", name="Foo") ChildModel1.objects.create(my_own_pk="bar", name="Bar", parent=parent) ChildModel2.objects.create(my_own_pk="baz", name="Baz", parent=parent) response = self.client.get(reverse('admin:admin_inlines_parentmodelwithcustompk_change', args=('foo',))) self.assertFalse(response.context['inline_admin_formset'].opts.has_registered_model) self.assertNotContains(response, INLINE_CHANGELINK_HTML) def test_tabular_inline_show_change_link_false_registered(self): "Inlines `show_change_link` disabled by default." poll = Poll.objects.create(name="New poll") Question.objects.create(poll=poll) response = self.client.get(reverse('admin:admin_inlines_poll_change', args=(poll.pk,))) self.assertTrue(response.context['inline_admin_formset'].opts.has_registered_model) self.assertNotContains(response, INLINE_CHANGELINK_HTML) def test_noneditable_inline_has_field_inputs(self): """Inlines without change permission shows field inputs on add form.""" response = self.client.get(reverse('admin:admin_inlines_novelreadonlychapter_add')) self.assertContains( response, '<input type="text" name="chapter_set-0-name" ' 'class="vTextField" maxlength="40" id="id_chapter_set-0-name">', html=True ) @override_settings(ROOT_URLCONF='admin_inlines.urls') class TestInlineMedia(TestDataMixin, TestCase): def setUp(self): self.client.force_login(self.superuser) def test_inline_media_only_base(self): holder = Holder(dummy=13) holder.save() Inner(dummy=42, holder=holder).save() change_url = reverse('admin:admin_inlines_holder_change', args=(holder.id,)) response = self.client.get(change_url) self.assertContains(response, 'my_awesome_admin_scripts.js') def test_inline_media_only_inline(self): holder = Holder3(dummy=13) holder.save() Inner3(dummy=42, holder=holder).save() change_url = reverse('admin:admin_inlines_holder3_change', args=(holder.id,)) response = self.client.get(change_url) self.assertEqual( response.context['inline_admin_formsets'][0].media._js, [ 'admin/js/vendor/jquery/jquery.min.js', 'my_awesome_inline_scripts.js', 'custom_number.js', 'admin/js/jquery.init.js', 'admin/js/inlines.min.js', ] ) self.assertContains(response, 'my_awesome_inline_scripts.js') def test_all_inline_media(self): holder = Holder2(dummy=13) holder.save() Inner2(dummy=42, holder=holder).save() change_url = reverse('admin:admin_inlines_holder2_change', args=(holder.id,)) response = self.client.get(change_url) self.assertContains(response, 'my_awesome_admin_scripts.js') self.assertContains(response, 'my_awesome_inline_scripts.js') @override_settings(ROOT_URLCONF='admin_inlines.urls') class TestInlineAdminForm(TestCase): def test_immutable_content_type(self): """Regression for #9362 The problem depends only on InlineAdminForm and its "original" argument, so we can safely set the other arguments to None/{}. We just need to check that the content_type argument of Child isn't altered by the internals of the inline form.""" sally = Teacher.objects.create(name='Sally') john = Parent.objects.create(name='John') joe = Child.objects.create(name='Joe', teacher=sally, parent=john) iaf = InlineAdminForm(None, None, {}, {}, joe) parent_ct = ContentType.objects.get_for_model(Parent) self.assertEqual(iaf.original.content_type, parent_ct) @override_settings(ROOT_URLCONF='admin_inlines.urls') class TestInlineProtectedOnDelete(TestDataMixin, TestCase): def setUp(self): self.client.force_login(self.superuser) def test_deleting_inline_with_protected_delete_does_not_validate(self): lotr = Novel.objects.create(name='Lord of the rings') chapter = Chapter.objects.create(novel=lotr, name='Many Meetings') foot_note = FootNote.objects.create(chapter=chapter, note='yadda yadda') change_url = reverse('admin:admin_inlines_novel_change', args=(lotr.id,)) response = self.client.get(change_url) data = { 'name': lotr.name, 'chapter_set-TOTAL_FORMS': 1, 'chapter_set-INITIAL_FORMS': 1, 'chapter_set-MAX_NUM_FORMS': 1000, '_save': 'Save', 'chapter_set-0-id': chapter.id, 'chapter_set-0-name': chapter.name, 'chapter_set-0-novel': lotr.id, 'chapter_set-0-DELETE': 'on' } response = self.client.post(change_url, data) self.assertContains(response, "Deleting chapter %s would require deleting " "the following protected related objects: foot note %s" % (chapter, foot_note)) @override_settings(ROOT_URLCONF='admin_inlines.urls') class TestInlinePermissions(TestCase): """ Make sure the admin respects permissions for objects that are edited inline. Refs #8060. """ @classmethod def setUpTestData(cls): cls.user = User(username='admin', is_staff=True, is_active=True) cls.user.set_password('secret') cls.user.save() cls.author_ct = ContentType.objects.get_for_model(Author) cls.holder_ct = ContentType.objects.get_for_model(Holder2) cls.book_ct = ContentType.objects.get_for_model(Book) cls.inner_ct = ContentType.objects.get_for_model(Inner2) # User always has permissions to add and change Authors, and Holders, # the main (parent) models of the inlines. Permissions on the inlines # vary per test. permission = Permission.objects.get(codename='add_author', content_type=cls.author_ct) cls.user.user_permissions.add(permission) permission = Permission.objects.get(codename='change_author', content_type=cls.author_ct) cls.user.user_permissions.add(permission) permission = Permission.objects.get(codename='add_holder2', content_type=cls.holder_ct) cls.user.user_permissions.add(permission) permission = Permission.objects.get(codename='change_holder2', content_type=cls.holder_ct) cls.user.user_permissions.add(permission) author = Author.objects.create(pk=1, name='The Author') cls.book = author.books.create(name='The inline Book') cls.author_change_url = reverse('admin:admin_inlines_author_change', args=(author.id,)) # Get the ID of the automatically created intermediate model for the Author-Book m2m author_book_auto_m2m_intermediate = Author.books.through.objects.get(author=author, book=cls.book) cls.author_book_auto_m2m_intermediate_id = author_book_auto_m2m_intermediate.pk cls.holder = Holder2.objects.create(dummy=13) cls.inner2 = Inner2.objects.create(dummy=42, holder=cls.holder) def setUp(self): self.holder_change_url = reverse('admin:admin_inlines_holder2_change', args=(self.holder.id,)) self.client.force_login(self.user) def test_inline_add_m2m_noperm(self): response = self.client.get(reverse('admin:admin_inlines_author_add')) # No change permission on books, so no inline self.assertNotContains(response, '<h2>Author-book relationships</h2>') self.assertNotContains(response, 'Add another Author-Book Relationship') self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"') def test_inline_add_fk_noperm(self): response = self.client.get(reverse('admin:admin_inlines_holder2_add')) # No permissions on Inner2s, so no inline self.assertNotContains(response, '<h2>Inner2s</h2>') self.assertNotContains(response, 'Add another Inner2') self.assertNotContains(response, 'id="id_inner2_set-TOTAL_FORMS"') def test_inline_change_m2m_noperm(self): response = self.client.get(self.author_change_url) # No change permission on books, so no inline self.assertNotContains(response, '<h2>Author-book relationships</h2>') self.assertNotContains(response, 'Add another Author-Book Relationship') self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"') def test_inline_change_fk_noperm(self): response = self.client.get(self.holder_change_url) # No permissions on Inner2s, so no inline self.assertNotContains(response, '<h2>Inner2s</h2>') self.assertNotContains(response, 'Add another Inner2') self.assertNotContains(response, 'id="id_inner2_set-TOTAL_FORMS"') def test_inline_add_m2m_view_only_perm(self): permission = Permission.objects.get(codename='view_book', content_type=self.book_ct) self.user.user_permissions.add(permission) response = self.client.get(reverse('admin:admin_inlines_author_add')) # View-only inlines. (It could be nicer to hide the empty, non-editable # inlines on the add page.) self.assertIs(response.context['inline_admin_formset'].has_view_permission, True) self.assertIs(response.context['inline_admin_formset'].has_add_permission, False) self.assertIs(response.context['inline_admin_formset'].has_change_permission, False) self.assertIs(response.context['inline_admin_formset'].has_delete_permission, False) self.assertContains(response, '<h2>Author-book relationships</h2>') self.assertContains( response, '<input type="hidden" name="Author_books-TOTAL_FORMS" value="0" ' 'id="id_Author_books-TOTAL_FORMS">', html=True, ) self.assertNotContains(response, 'Add another Author-Book Relationship') def test_inline_add_m2m_add_perm(self): permission = Permission.objects.get(codename='add_book', content_type=self.book_ct) self.user.user_permissions.add(permission) response = self.client.get(reverse('admin:admin_inlines_author_add')) # No change permission on Books, so no inline self.assertNotContains(response, '<h2>Author-book relationships</h2>') self.assertNotContains(response, 'Add another Author-Book Relationship') self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"') def test_inline_add_fk_add_perm(self): permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct) self.user.user_permissions.add(permission) response = self.client.get(reverse('admin:admin_inlines_holder2_add')) # Add permission on inner2s, so we get the inline self.assertContains(response, '<h2>Inner2s</h2>') self.assertContains(response, 'Add another Inner2') self.assertContains(response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" ' 'value="3" name="inner2_set-TOTAL_FORMS">', html=True) def test_inline_change_m2m_add_perm(self): permission = Permission.objects.get(codename='add_book', content_type=self.book_ct) self.user.user_permissions.add(permission) response = self.client.get(self.author_change_url) # No change permission on books, so no inline self.assertNotContains(response, '<h2>Author-book relationships</h2>') self.assertNotContains(response, 'Add another Author-Book Relationship') self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"') self.assertNotContains(response, 'id="id_Author_books-0-DELETE"') def test_inline_change_m2m_view_only_perm(self): permission = Permission.objects.get(codename='view_book', content_type=self.book_ct) self.user.user_permissions.add(permission) response = self.client.get(self.author_change_url) # View-only inlines. self.assertIs(response.context['inline_admin_formset'].has_view_permission, True) self.assertIs(response.context['inline_admin_formset'].has_add_permission, False) self.assertIs(response.context['inline_admin_formset'].has_change_permission, False) self.assertIs(response.context['inline_admin_formset'].has_delete_permission, False) self.assertContains(response, '<h2>Author-book relationships</h2>') self.assertContains( response, '<input type="hidden" name="Author_books-TOTAL_FORMS" value="1" ' 'id="id_Author_books-TOTAL_FORMS">', html=True, ) # The field in the inline is read-only. self.assertContains(response, '<p>%s</p>' % self.book) self.assertNotContains( response, '<input type="checkbox" name="Author_books-0-DELETE" id="id_Author_books-0-DELETE">', html=True, ) def test_inline_change_m2m_change_perm(self): permission = Permission.objects.get(codename='change_book', content_type=self.book_ct) self.user.user_permissions.add(permission) response = self.client.get(self.author_change_url) # We have change perm on books, so we can add/change/delete inlines self.assertIs(response.context['inline_admin_formset'].has_view_permission, True) self.assertIs(response.context['inline_admin_formset'].has_add_permission, True) self.assertIs(response.context['inline_admin_formset'].has_change_permission, True) self.assertIs(response.context['inline_admin_formset'].has_delete_permission, True) self.assertContains(response, '<h2>Author-book relationships</h2>') self.assertContains(response, 'Add another Author-book relationship') self.assertContains(response, '<input type="hidden" id="id_Author_books-TOTAL_FORMS" ' 'value="4" name="Author_books-TOTAL_FORMS">', html=True) self.assertContains( response, '<input type="hidden" id="id_Author_books-0-id" value="%i" ' 'name="Author_books-0-id">' % self.author_book_auto_m2m_intermediate_id, html=True ) self.assertContains(response, 'id="id_Author_books-0-DELETE"') def test_inline_change_fk_add_perm(self): permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct) self.user.user_permissions.add(permission) response = self.client.get(self.holder_change_url) # Add permission on inner2s, so we can add but not modify existing self.assertContains(response, '<h2>Inner2s</h2>') self.assertContains(response, 'Add another Inner2') # 3 extra forms only, not the existing instance form self.assertContains( response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" value="3" ' 'name="inner2_set-TOTAL_FORMS">', html=True ) self.assertNotContains( response, '<input type="hidden" id="id_inner2_set-0-id" value="%i" name="inner2_set-0-id">' % self.inner2.id, html=True ) def test_inline_change_fk_change_perm(self): permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct) self.user.user_permissions.add(permission) response = self.client.get(self.holder_change_url) # Change permission on inner2s, so we can change existing but not add new self.assertContains(response, '<h2>Inner2s</h2>', count=2) # Just the one form for existing instances self.assertContains( response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" value="1" name="inner2_set-TOTAL_FORMS">', html=True ) self.assertContains( response, '<input type="hidden" id="id_inner2_set-0-id" value="%i" name="inner2_set-0-id">' % self.inner2.id, html=True ) # max-num 0 means we can't add new ones self.assertContains( response, '<input type="hidden" id="id_inner2_set-MAX_NUM_FORMS" value="0" name="inner2_set-MAX_NUM_FORMS">', html=True ) # TabularInline self.assertContains(response, '<th class="column-dummy required">Dummy</th>', html=True) self.assertContains( response, '<input type="number" name="inner2_set-2-0-dummy" value="%s" ' 'class="vIntegerField" id="id_inner2_set-2-0-dummy">' % self.inner2.dummy, html=True, ) def test_inline_change_fk_add_change_perm(self): permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct) self.user.user_permissions.add(permission) permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct) self.user.user_permissions.add(permission) response = self.client.get(self.holder_change_url) # Add/change perm, so we can add new and change existing self.assertContains(response, '<h2>Inner2s</h2>') # One form for existing instance and three extra for new self.assertContains( response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" value="4" name="inner2_set-TOTAL_FORMS">', html=True ) self.assertContains( response, '<input type="hidden" id="id_inner2_set-0-id" value="%i" name="inner2_set-0-id">' % self.inner2.id, html=True ) def test_inline_change_fk_change_del_perm(self): permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct) self.user.user_permissions.add(permission) permission = Permission.objects.get(codename='delete_inner2', content_type=self.inner_ct) self.user.user_permissions.add(permission) response = self.client.get(self.holder_change_url) # Change/delete perm on inner2s, so we can change/delete existing self.assertContains(response, '<h2>Inner2s</h2>') # One form for existing instance only, no new self.assertContains( response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" value="1" name="inner2_set-TOTAL_FORMS">', html=True ) self.assertContains( response, '<input type="hidden" id="id_inner2_set-0-id" value="%i" name="inner2_set-0-id">' % self.inner2.id, html=True ) self.assertContains(response, 'id="id_inner2_set-0-DELETE"') def test_inline_change_fk_all_perms(self): permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct) self.user.user_permissions.add(permission) permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct) self.user.user_permissions.add(permission) permission = Permission.objects.get(codename='delete_inner2', content_type=self.inner_ct) self.user.user_permissions.add(permission) response = self.client.get(self.holder_change_url) # All perms on inner2s, so we can add/change/delete self.assertContains(response, '<h2>Inner2s</h2>', count=2) # One form for existing instance only, three for new self.assertContains( response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" value="4" name="inner2_set-TOTAL_FORMS">', html=True ) self.assertContains( response, '<input type="hidden" id="id_inner2_set-0-id" value="%i" name="inner2_set-0-id">' % self.inner2.id, html=True ) self.assertContains(response, 'id="id_inner2_set-0-DELETE"') # TabularInline self.assertContains(response, '<th class="column-dummy required">Dummy</th>', html=True) self.assertContains( response, '<input type="number" name="inner2_set-2-0-dummy" value="%s" ' 'class="vIntegerField" id="id_inner2_set-2-0-dummy">' % self.inner2.dummy, html=True, ) @override_settings(ROOT_URLCONF='admin_inlines.urls') class SeleniumTests(AdminSeleniumTestCase): available_apps = ['admin_inlines'] + AdminSeleniumTestCase.available_apps def setUp(self): User.objects.create_superuser(username='super', password='secret', email='[email protected]') def test_add_stackeds(self): """ The "Add another XXX" link correctly adds items to the stacked formset. """ self.admin_login(username='super', password='secret') self.selenium.get(self.live_server_url + reverse('admin:admin_inlines_holder4_add')) inline_id = '#inner4stacked_set-group' def rows_length(): return len(self.selenium.find_elements_by_css_selector('%s .dynamic-inner4stacked_set' % inline_id)) self.assertEqual(rows_length(), 3) add_button = self.selenium.find_element_by_link_text( 'Add another Inner4 stacked') add_button.click() self.assertEqual(rows_length(), 4) def test_delete_stackeds(self): self.admin_login(username='super', password='secret') self.selenium.get(self.live_server_url + reverse('admin:admin_inlines_holder4_add')) inline_id = '#inner4stacked_set-group' def rows_length(): return len(self.selenium.find_elements_by_css_selector('%s .dynamic-inner4stacked_set' % inline_id)) self.assertEqual(rows_length(), 3) add_button = self.selenium.find_element_by_link_text( 'Add another Inner4 stacked') add_button.click() add_button.click() self.assertEqual(rows_length(), 5, msg="sanity check") for delete_link in self.selenium.find_elements_by_css_selector('%s .inline-deletelink' % inline_id): delete_link.click() self.assertEqual(rows_length(), 3) def test_add_inlines(self): """ The "Add another XXX" link correctly adds items to the inline form. """ self.admin_login(username='super', password='secret') self.selenium.get(self.live_server_url + reverse('admin:admin_inlines_profilecollection_add')) # There's only one inline to start with and it has the correct ID. self.assertEqual(len(self.selenium.find_elements_by_css_selector( '.dynamic-profile_set')), 1) self.assertEqual(self.selenium.find_elements_by_css_selector( '.dynamic-profile_set')[0].get_attribute('id'), 'profile_set-0') self.assertEqual(len(self.selenium.find_elements_by_css_selector( '.dynamic-profile_set#profile_set-0 input[name=profile_set-0-first_name]')), 1) self.assertEqual(len(self.selenium.find_elements_by_css_selector( '.dynamic-profile_set#profile_set-0 input[name=profile_set-0-last_name]')), 1) # Add an inline self.selenium.find_element_by_link_text('Add another Profile').click() # The inline has been added, it has the right id, and it contains the # correct fields. self.assertEqual(len(self.selenium.find_elements_by_css_selector('.dynamic-profile_set')), 2) self.assertEqual(self.selenium.find_elements_by_css_selector( '.dynamic-profile_set')[1].get_attribute('id'), 'profile_set-1') self.assertEqual(len(self.selenium.find_elements_by_css_selector( '.dynamic-profile_set#profile_set-1 input[name=profile_set-1-first_name]')), 1) self.assertEqual(len(self.selenium.find_elements_by_css_selector( '.dynamic-profile_set#profile_set-1 input[name=profile_set-1-last_name]')), 1) # Let's add another one to be sure self.selenium.find_element_by_link_text('Add another Profile').click() self.assertEqual(len(self.selenium.find_elements_by_css_selector('.dynamic-profile_set')), 3) self.assertEqual(self.selenium.find_elements_by_css_selector( '.dynamic-profile_set')[2].get_attribute('id'), 'profile_set-2') self.assertEqual(len(self.selenium.find_elements_by_css_selector( '.dynamic-profile_set#profile_set-2 input[name=profile_set-2-first_name]')), 1) self.assertEqual(len(self.selenium.find_elements_by_css_selector( '.dynamic-profile_set#profile_set-2 input[name=profile_set-2-last_name]')), 1) # Enter some data and click 'Save' self.selenium.find_element_by_name('profile_set-0-first_name').send_keys('0 first name 1') self.selenium.find_element_by_name('profile_set-0-last_name').send_keys('0 last name 2') self.selenium.find_element_by_name('profile_set-1-first_name').send_keys('1 first name 1') self.selenium.find_element_by_name('profile_set-1-last_name').send_keys('1 last name 2') self.selenium.find_element_by_name('profile_set-2-first_name').send_keys('2 first name 1') self.selenium.find_element_by_name('profile_set-2-last_name').send_keys('2 last name 2') self.selenium.find_element_by_xpath('//input[@value="Save"]').click() self.wait_page_loaded() # The objects have been created in the database self.assertEqual(ProfileCollection.objects.all().count(), 1) self.assertEqual(Profile.objects.all().count(), 3) def test_delete_inlines(self): self.admin_login(username='super', password='secret') self.selenium.get(self.live_server_url + reverse('admin:admin_inlines_profilecollection_add')) # Add a few inlines self.selenium.find_element_by_link_text('Add another Profile').click() self.selenium.find_element_by_link_text('Add another Profile').click() self.selenium.find_element_by_link_text('Add another Profile').click() self.selenium.find_element_by_link_text('Add another Profile').click() self.assertEqual(len(self.selenium.find_elements_by_css_selector( '#profile_set-group table tr.dynamic-profile_set')), 5) self.assertEqual(len(self.selenium.find_elements_by_css_selector( 'form#profilecollection_form tr.dynamic-profile_set#profile_set-0')), 1) self.assertEqual(len(self.selenium.find_elements_by_css_selector( 'form#profilecollection_form tr.dynamic-profile_set#profile_set-1')), 1) self.assertEqual(len(self.selenium.find_elements_by_css_selector( 'form#profilecollection_form tr.dynamic-profile_set#profile_set-2')), 1) self.assertEqual(len(self.selenium.find_elements_by_css_selector( 'form#profilecollection_form tr.dynamic-profile_set#profile_set-3')), 1) self.assertEqual(len(self.selenium.find_elements_by_css_selector( 'form#profilecollection_form tr.dynamic-profile_set#profile_set-4')), 1) # Click on a few delete buttons self.selenium.find_element_by_css_selector( 'form#profilecollection_form tr.dynamic-profile_set#profile_set-1 td.delete a').click() self.selenium.find_element_by_css_selector( 'form#profilecollection_form tr.dynamic-profile_set#profile_set-2 td.delete a').click() # The rows are gone and the IDs have been re-sequenced self.assertEqual(len(self.selenium.find_elements_by_css_selector( '#profile_set-group table tr.dynamic-profile_set')), 3) self.assertEqual(len(self.selenium.find_elements_by_css_selector( 'form#profilecollection_form tr.dynamic-profile_set#profile_set-0')), 1) self.assertEqual(len(self.selenium.find_elements_by_css_selector( 'form#profilecollection_form tr.dynamic-profile_set#profile_set-1')), 1) self.assertEqual(len(self.selenium.find_elements_by_css_selector( 'form#profilecollection_form tr.dynamic-profile_set#profile_set-2')), 1) def test_alternating_rows(self): self.admin_login(username='super', password='secret') self.selenium.get(self.live_server_url + reverse('admin:admin_inlines_profilecollection_add')) # Add a few inlines self.selenium.find_element_by_link_text('Add another Profile').click() self.selenium.find_element_by_link_text('Add another Profile').click() row_selector = 'form#profilecollection_form tr.dynamic-profile_set' self.assertEqual(len(self.selenium.find_elements_by_css_selector( "%s.row1" % row_selector)), 2, msg="Expect two row1 styled rows") self.assertEqual(len(self.selenium.find_elements_by_css_selector( "%s.row2" % row_selector)), 1, msg="Expect one row2 styled row") def test_collapsed_inlines(self): # Collapsed inlines have SHOW/HIDE links. self.admin_login(username='super', password='secret') self.selenium.get(self.live_server_url + reverse('admin:admin_inlines_author_add')) # One field is in a stacked inline, other in a tabular one. test_fields = ['#id_nonautopkbook_set-0-title', '#id_nonautopkbook_set-2-0-title'] show_links = self.selenium.find_elements_by_link_text('SHOW') self.assertEqual(len(show_links), 3) for show_index, field_name in enumerate(test_fields, 0): self.wait_until_invisible(field_name) show_links[show_index].click() self.wait_until_visible(field_name) hide_links = self.selenium.find_elements_by_link_text('HIDE') self.assertEqual(len(hide_links), 2) for hide_index, field_name in enumerate(test_fields, 0): self.wait_until_visible(field_name) hide_links[hide_index].click() self.wait_until_invisible(field_name) def test_added_stacked_inline_with_collapsed_fields(self): self.admin_login(username='super', password='secret') self.selenium.get(self.live_server_url + reverse('admin:admin_inlines_teacher_add')) self.selenium.find_element_by_link_text('Add another Child').click() test_fields = ['#id_child_set-0-name', '#id_child_set-1-name'] show_links = self.selenium.find_elements_by_link_text('SHOW') self.assertEqual(len(show_links), 2) for show_index, field_name in enumerate(test_fields, 0): self.wait_until_invisible(field_name) show_links[show_index].click() self.wait_until_visible(field_name) hide_links = self.selenium.find_elements_by_link_text('HIDE') self.assertEqual(len(hide_links), 2) for hide_index, field_name in enumerate(test_fields, 0): self.wait_until_visible(field_name) hide_links[hide_index].click() self.wait_until_invisible(field_name)
61f66078af10169c6dd68f2b63864cc58c4e73205f950f716429d07d8c725d05
from django import forms from django.contrib import admin from django.db import models from .models import ( Author, BinaryTree, CapoFamiglia, Chapter, Child, ChildModel1, ChildModel2, Consigliere, EditablePKBook, ExtraTerrestrial, Fashionista, Holder, Holder2, Holder3, Holder4, Inner, Inner2, Inner3, Inner4Stacked, Inner4Tabular, NonAutoPKBook, NonAutoPKBookChild, Novel, NovelReadonlyChapter, ParentModelWithCustomPk, Poll, Profile, ProfileCollection, Question, ReadOnlyInline, ShoppingWeakness, Sighting, SomeChildModel, SomeParentModel, SottoCapo, Teacher, Title, TitleCollection, ) site = admin.AdminSite(name="admin") class BookInline(admin.TabularInline): model = Author.books.through class NonAutoPKBookTabularInline(admin.TabularInline): model = NonAutoPKBook classes = ('collapse',) class NonAutoPKBookChildTabularInline(admin.TabularInline): model = NonAutoPKBookChild classes = ('collapse',) class NonAutoPKBookStackedInline(admin.StackedInline): model = NonAutoPKBook classes = ('collapse',) class EditablePKBookTabularInline(admin.TabularInline): model = EditablePKBook class EditablePKBookStackedInline(admin.StackedInline): model = EditablePKBook class AuthorAdmin(admin.ModelAdmin): inlines = [ BookInline, NonAutoPKBookTabularInline, NonAutoPKBookStackedInline, EditablePKBookTabularInline, EditablePKBookStackedInline, NonAutoPKBookChildTabularInline, ] class InnerInline(admin.StackedInline): model = Inner can_delete = False readonly_fields = ('readonly',) # For bug #13174 tests. class HolderAdmin(admin.ModelAdmin): class Media: js = ('my_awesome_admin_scripts.js',) class ReadOnlyInlineInline(admin.TabularInline): model = ReadOnlyInline readonly_fields = ['name'] class InnerInline2(admin.StackedInline): model = Inner2 class Media: js = ('my_awesome_inline_scripts.js',) class InnerInline2Tabular(admin.TabularInline): model = Inner2 class CustomNumberWidget(forms.NumberInput): class Media: js = ('custom_number.js',) class InnerInline3(admin.StackedInline): model = Inner3 formfield_overrides = { models.IntegerField: {'widget': CustomNumberWidget}, } class Media: js = ('my_awesome_inline_scripts.js',) class TitleForm(forms.ModelForm): title1 = forms.CharField(max_length=100) def clean(self): cleaned_data = self.cleaned_data title1 = cleaned_data.get("title1") title2 = cleaned_data.get("title2") if title1 != title2: raise forms.ValidationError("The two titles must be the same") return cleaned_data class TitleInline(admin.TabularInline): model = Title form = TitleForm extra = 1 class Inner4StackedInline(admin.StackedInline): model = Inner4Stacked show_change_link = True class Inner4TabularInline(admin.TabularInline): model = Inner4Tabular show_change_link = True class Holder4Admin(admin.ModelAdmin): inlines = [Inner4StackedInline, Inner4TabularInline] class InlineWeakness(admin.TabularInline): model = ShoppingWeakness extra = 1 class QuestionInline(admin.TabularInline): model = Question readonly_fields = ['call_me'] def call_me(self, obj): return 'Callable in QuestionInline' class PollAdmin(admin.ModelAdmin): inlines = [QuestionInline] def call_me(self, obj): return 'Callable in PollAdmin' class ChapterInline(admin.TabularInline): model = Chapter readonly_fields = ['call_me'] def call_me(self, obj): return 'Callable in ChapterInline' class NovelAdmin(admin.ModelAdmin): inlines = [ChapterInline] class ReadOnlyChapterInline(admin.TabularInline): model = Chapter def has_change_permission(self, request, obj=None): return False class NovelReadonlyChapterAdmin(admin.ModelAdmin): inlines = [ReadOnlyChapterInline] class ConsigliereInline(admin.TabularInline): model = Consigliere class SottoCapoInline(admin.TabularInline): model = SottoCapo class ProfileInline(admin.TabularInline): model = Profile extra = 1 # admin for #18433 class ChildModel1Inline(admin.TabularInline): model = ChildModel1 class ChildModel2Inline(admin.StackedInline): model = ChildModel2 # admin for #19425 and #18388 class BinaryTreeAdmin(admin.TabularInline): model = BinaryTree def get_extra(self, request, obj=None, **kwargs): extra = 2 if obj: return extra - obj.binarytree_set.count() return extra def get_max_num(self, request, obj=None, **kwargs): max_num = 3 if obj: return max_num - obj.binarytree_set.count() return max_num # admin for #19524 class SightingInline(admin.TabularInline): model = Sighting # admin and form for #18263 class SomeChildModelForm(forms.ModelForm): class Meta: fields = '__all__' model = SomeChildModel widgets = { 'position': forms.HiddenInput, } labels = {'readonly_field': 'Label from ModelForm.Meta'} help_texts = {'readonly_field': 'Help text from ModelForm.Meta'} def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.fields['name'].label = 'new label' class SomeChildModelInline(admin.TabularInline): model = SomeChildModel form = SomeChildModelForm readonly_fields = ('readonly_field',) class StudentInline(admin.StackedInline): model = Child extra = 1 fieldsets = [ ('Name', {'fields': ('name',), 'classes': ('collapse',)}), ] class TeacherAdmin(admin.ModelAdmin): inlines = [StudentInline] site.register(TitleCollection, inlines=[TitleInline]) # Test bug #12561 and #12778 # only ModelAdmin media site.register(Holder, HolderAdmin, inlines=[InnerInline]) # ModelAdmin and Inline media site.register(Holder2, HolderAdmin, inlines=[InnerInline2, InnerInline2Tabular]) # only Inline media site.register(Holder3, inlines=[InnerInline3]) site.register(Poll, PollAdmin) site.register(Novel, NovelAdmin) site.register(NovelReadonlyChapter, NovelReadonlyChapterAdmin) site.register(Fashionista, inlines=[InlineWeakness]) site.register(Holder4, Holder4Admin) site.register(Author, AuthorAdmin) site.register(CapoFamiglia, inlines=[ConsigliereInline, SottoCapoInline, ReadOnlyInlineInline]) site.register(ProfileCollection, inlines=[ProfileInline]) site.register(ParentModelWithCustomPk, inlines=[ChildModel1Inline, ChildModel2Inline]) site.register(BinaryTree, inlines=[BinaryTreeAdmin]) site.register(ExtraTerrestrial, inlines=[SightingInline]) site.register(SomeParentModel, inlines=[SomeChildModelInline]) site.register([Question, Inner4Stacked, Inner4Tabular]) site.register(Teacher, TeacherAdmin)
7ddff46c7e3cec88fd5114bafbae42f85e3cb3b8dfdbd3086683ab6196668b09
import json import mimetypes import os import re import sys from copy import copy from functools import partial from http import HTTPStatus from importlib import import_module from io import BytesIO from urllib.parse import unquote_to_bytes, urljoin, urlparse, urlsplit from django.conf import settings from django.core.handlers.base import BaseHandler from django.core.handlers.wsgi import WSGIRequest from django.core.serializers.json import DjangoJSONEncoder from django.core.signals import ( got_request_exception, request_finished, request_started, ) from django.db import close_old_connections from django.http import HttpRequest, QueryDict, SimpleCookie from django.test import signals from django.test.utils import ContextList from django.urls import resolve from django.utils.encoding import force_bytes from django.utils.functional import SimpleLazyObject from django.utils.http import urlencode from django.utils.itercompat import is_iterable __all__ = ('Client', 'RedirectCycleError', 'RequestFactory', 'encode_file', 'encode_multipart') BOUNDARY = 'BoUnDaRyStRiNg' MULTIPART_CONTENT = 'multipart/form-data; boundary=%s' % BOUNDARY CONTENT_TYPE_RE = re.compile(r'.*; charset=([\w\d-]+);?') # Structured suffix spec: https://tools.ietf.org/html/rfc6838#section-4.2.8 JSON_CONTENT_TYPE_RE = re.compile(r'^application\/(.+\+)?json') class RedirectCycleError(Exception): """The test client has been asked to follow a redirect loop.""" def __init__(self, message, last_response): super().__init__(message) self.last_response = last_response self.redirect_chain = last_response.redirect_chain class FakePayload: """ A wrapper around BytesIO that restricts what can be read since data from the network can't be seeked and cannot be read outside of its content length. This makes sure that views can't do anything under the test client that wouldn't work in real life. """ def __init__(self, content=None): self.__content = BytesIO() self.__len = 0 self.read_started = False if content is not None: self.write(content) def __len__(self): return self.__len def read(self, num_bytes=None): if not self.read_started: self.__content.seek(0) self.read_started = True if num_bytes is None: num_bytes = self.__len or 0 assert self.__len >= num_bytes, "Cannot read more than the available bytes from the HTTP incoming data." content = self.__content.read(num_bytes) self.__len -= num_bytes return content def write(self, content): if self.read_started: raise ValueError("Unable to write a payload after it's been read") content = force_bytes(content) self.__content.write(content) self.__len += len(content) def closing_iterator_wrapper(iterable, close): try: yield from iterable finally: request_finished.disconnect(close_old_connections) close() # will fire request_finished request_finished.connect(close_old_connections) def conditional_content_removal(request, response): """ Simulate the behavior of most Web servers by removing the content of responses for HEAD requests, 1xx, 204, and 304 responses. Ensure compliance with RFC 7230, section 3.3.3. """ if 100 <= response.status_code < 200 or response.status_code in (204, 304): if response.streaming: response.streaming_content = [] else: response.content = b'' if request.method == 'HEAD': if response.streaming: response.streaming_content = [] else: response.content = b'' return response class ClientHandler(BaseHandler): """ A HTTP Handler that can be used for testing purposes. Use the WSGI interface to compose requests, but return the raw HttpResponse object with the originating WSGIRequest attached to its ``wsgi_request`` attribute. """ def __init__(self, enforce_csrf_checks=True, *args, **kwargs): self.enforce_csrf_checks = enforce_csrf_checks super().__init__(*args, **kwargs) def __call__(self, environ): # Set up middleware if needed. We couldn't do this earlier, because # settings weren't available. if self._middleware_chain is None: self.load_middleware() request_started.disconnect(close_old_connections) request_started.send(sender=self.__class__, environ=environ) request_started.connect(close_old_connections) request = WSGIRequest(environ) # sneaky little hack so that we can easily get round # CsrfViewMiddleware. This makes life easier, and is probably # required for backwards compatibility with external tests against # admin views. request._dont_enforce_csrf_checks = not self.enforce_csrf_checks # Request goes through middleware. response = self.get_response(request) # Simulate behaviors of most Web servers. conditional_content_removal(request, response) # Attach the originating request to the response so that it could be # later retrieved. response.wsgi_request = request # Emulate a WSGI server by calling the close method on completion. if response.streaming: response.streaming_content = closing_iterator_wrapper( response.streaming_content, response.close) else: request_finished.disconnect(close_old_connections) response.close() # will fire request_finished request_finished.connect(close_old_connections) return response def store_rendered_templates(store, signal, sender, template, context, **kwargs): """ Store templates and contexts that are rendered. The context is copied so that it is an accurate representation at the time of rendering. """ store.setdefault('templates', []).append(template) if 'context' not in store: store['context'] = ContextList() store['context'].append(copy(context)) def encode_multipart(boundary, data): """ Encode multipart POST data from a dictionary of form values. The key will be used as the form data name; the value will be transmitted as content. If the value is a file, the contents of the file will be sent as an application/octet-stream; otherwise, str(value) will be sent. """ lines = [] def to_bytes(s): return force_bytes(s, settings.DEFAULT_CHARSET) # Not by any means perfect, but good enough for our purposes. def is_file(thing): return hasattr(thing, "read") and callable(thing.read) # Each bit of the multipart form data could be either a form value or a # file, or a *list* of form values and/or files. Remember that HTTP field # names can be duplicated! for (key, value) in data.items(): if value is None: raise TypeError( 'Cannot encode None as POST data. Did you mean to pass an ' 'empty string or omit the value?' ) elif is_file(value): lines.extend(encode_file(boundary, key, value)) elif not isinstance(value, str) and is_iterable(value): for item in value: if is_file(item): lines.extend(encode_file(boundary, key, item)) else: lines.extend(to_bytes(val) for val in [ '--%s' % boundary, 'Content-Disposition: form-data; name="%s"' % key, '', item ]) else: lines.extend(to_bytes(val) for val in [ '--%s' % boundary, 'Content-Disposition: form-data; name="%s"' % key, '', value ]) lines.extend([ to_bytes('--%s--' % boundary), b'', ]) return b'\r\n'.join(lines) def encode_file(boundary, key, file): def to_bytes(s): return force_bytes(s, settings.DEFAULT_CHARSET) # file.name might not be a string. For example, it's an int for # tempfile.TemporaryFile(). file_has_string_name = hasattr(file, 'name') and isinstance(file.name, str) filename = os.path.basename(file.name) if file_has_string_name else '' if hasattr(file, 'content_type'): content_type = file.content_type elif filename: content_type = mimetypes.guess_type(filename)[0] else: content_type = None if content_type is None: content_type = 'application/octet-stream' filename = filename or key return [ to_bytes('--%s' % boundary), to_bytes('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename)), to_bytes('Content-Type: %s' % content_type), b'', to_bytes(file.read()) ] class RequestFactory: """ Class that lets you create mock Request objects for use in testing. Usage: rf = RequestFactory() get_request = rf.get('/hello/') post_request = rf.post('/submit/', {'foo': 'bar'}) Once you have a request object you can pass it to any view function, just as if that view had been hooked up using a URLconf. """ def __init__(self, *, json_encoder=DjangoJSONEncoder, **defaults): self.json_encoder = json_encoder self.defaults = defaults self.cookies = SimpleCookie() self.errors = BytesIO() def _base_environ(self, **request): """ The base environment for a request. """ # This is a minimal valid WSGI environ dictionary, plus: # - HTTP_COOKIE: for cookie support, # - REMOTE_ADDR: often useful, see #8551. # See https://www.python.org/dev/peps/pep-3333/#environ-variables return { 'HTTP_COOKIE': '; '.join(sorted( '%s=%s' % (morsel.key, morsel.coded_value) for morsel in self.cookies.values() )), 'PATH_INFO': '/', 'REMOTE_ADDR': '127.0.0.1', 'REQUEST_METHOD': 'GET', 'SCRIPT_NAME': '', 'SERVER_NAME': 'testserver', 'SERVER_PORT': '80', 'SERVER_PROTOCOL': 'HTTP/1.1', 'wsgi.version': (1, 0), 'wsgi.url_scheme': 'http', 'wsgi.input': FakePayload(b''), 'wsgi.errors': self.errors, 'wsgi.multiprocess': True, 'wsgi.multithread': False, 'wsgi.run_once': False, **self.defaults, **request, } def request(self, **request): "Construct a generic request object." return WSGIRequest(self._base_environ(**request)) def _encode_data(self, data, content_type): if content_type is MULTIPART_CONTENT: return encode_multipart(BOUNDARY, data) else: # Encode the content so that the byte representation is correct. match = CONTENT_TYPE_RE.match(content_type) if match: charset = match.group(1) else: charset = settings.DEFAULT_CHARSET return force_bytes(data, encoding=charset) def _encode_json(self, data, content_type): """ Return encoded JSON if data is a dict, list, or tuple and content_type is application/json. """ should_encode = JSON_CONTENT_TYPE_RE.match(content_type) and isinstance(data, (dict, list, tuple)) return json.dumps(data, cls=self.json_encoder) if should_encode else data def _get_path(self, parsed): path = parsed.path # If there are parameters, add them if parsed.params: path += ";" + parsed.params path = unquote_to_bytes(path) # Replace the behavior where non-ASCII values in the WSGI environ are # arbitrarily decoded with ISO-8859-1. # Refs comment in `get_bytes_from_wsgi()`. return path.decode('iso-8859-1') def get(self, path, data=None, secure=False, **extra): """Construct a GET request.""" data = {} if data is None else data return self.generic('GET', path, secure=secure, **{ 'QUERY_STRING': urlencode(data, doseq=True), **extra, }) def post(self, path, data=None, content_type=MULTIPART_CONTENT, secure=False, **extra): """Construct a POST request.""" data = self._encode_json({} if data is None else data, content_type) post_data = self._encode_data(data, content_type) return self.generic('POST', path, post_data, content_type, secure=secure, **extra) def head(self, path, data=None, secure=False, **extra): """Construct a HEAD request.""" data = {} if data is None else data return self.generic('HEAD', path, secure=secure, **{ 'QUERY_STRING': urlencode(data, doseq=True), **extra, }) def trace(self, path, secure=False, **extra): """Construct a TRACE request.""" return self.generic('TRACE', path, secure=secure, **extra) def options(self, path, data='', content_type='application/octet-stream', secure=False, **extra): "Construct an OPTIONS request." return self.generic('OPTIONS', path, data, content_type, secure=secure, **extra) def put(self, path, data='', content_type='application/octet-stream', secure=False, **extra): """Construct a PUT request.""" data = self._encode_json(data, content_type) return self.generic('PUT', path, data, content_type, secure=secure, **extra) def patch(self, path, data='', content_type='application/octet-stream', secure=False, **extra): """Construct a PATCH request.""" data = self._encode_json(data, content_type) return self.generic('PATCH', path, data, content_type, secure=secure, **extra) def delete(self, path, data='', content_type='application/octet-stream', secure=False, **extra): """Construct a DELETE request.""" data = self._encode_json(data, content_type) return self.generic('DELETE', path, data, content_type, secure=secure, **extra) def generic(self, method, path, data='', content_type='application/octet-stream', secure=False, **extra): """Construct an arbitrary HTTP request.""" parsed = urlparse(str(path)) # path can be lazy data = force_bytes(data, settings.DEFAULT_CHARSET) r = { 'PATH_INFO': self._get_path(parsed), 'REQUEST_METHOD': method, 'SERVER_PORT': '443' if secure else '80', 'wsgi.url_scheme': 'https' if secure else 'http', } if data: r.update({ 'CONTENT_LENGTH': str(len(data)), 'CONTENT_TYPE': content_type, 'wsgi.input': FakePayload(data), }) r.update(extra) # If QUERY_STRING is absent or empty, we want to extract it from the URL. if not r.get('QUERY_STRING'): # WSGI requires latin-1 encoded strings. See get_path_info(). query_string = parsed[4].encode().decode('iso-8859-1') r['QUERY_STRING'] = query_string return self.request(**r) class Client(RequestFactory): """ A class that can act as a client for testing purposes. It allows the user to compose GET and POST requests, and obtain the response that the server gave to those requests. The server Response objects are annotated with the details of the contexts and templates that were rendered during the process of serving the request. Client objects are stateful - they will retain cookie (and thus session) details for the lifetime of the Client instance. This is not intended as a replacement for Twill/Selenium or the like - it is here to allow testing against the contexts and templates produced by a view, rather than the HTML rendered to the end-user. """ def __init__(self, enforce_csrf_checks=False, raise_request_exception=True, **defaults): super().__init__(**defaults) self.handler = ClientHandler(enforce_csrf_checks) self.raise_request_exception = raise_request_exception self.exc_info = None def store_exc_info(self, **kwargs): """Store exceptions when they are generated by a view.""" self.exc_info = sys.exc_info() @property def session(self): """Return the current session variables.""" engine = import_module(settings.SESSION_ENGINE) cookie = self.cookies.get(settings.SESSION_COOKIE_NAME) if cookie: return engine.SessionStore(cookie.value) session = engine.SessionStore() session.save() self.cookies[settings.SESSION_COOKIE_NAME] = session.session_key return session def request(self, **request): """ The master request method. Compose the environment dictionary and pass to the handler, return the result of the handler. Assume defaults for the query environment, which can be overridden using the arguments to the request. """ environ = self._base_environ(**request) # Curry a data dictionary into an instance of the template renderer # callback function. data = {} on_template_render = partial(store_rendered_templates, data) signal_uid = "template-render-%s" % id(request) signals.template_rendered.connect(on_template_render, dispatch_uid=signal_uid) # Capture exceptions created by the handler. exception_uid = "request-exception-%s" % id(request) got_request_exception.connect(self.store_exc_info, dispatch_uid=exception_uid) try: response = self.handler(environ) finally: signals.template_rendered.disconnect(dispatch_uid=signal_uid) got_request_exception.disconnect(dispatch_uid=exception_uid) # Look for a signaled exception, clear the current context exception # data, then re-raise the signaled exception. Also clear the signaled # exception from the local cache. response.exc_info = self.exc_info if self.exc_info: _, exc_value, _ = self.exc_info self.exc_info = None if self.raise_request_exception: raise exc_value # Save the client and request that stimulated the response. response.client = self response.request = request # Add any rendered template detail to the response. response.templates = data.get('templates', []) response.context = data.get('context') response.json = partial(self._parse_json, response) # Attach the ResolverMatch instance to the response. response.resolver_match = SimpleLazyObject(lambda: resolve(request['PATH_INFO'])) # Flatten a single context. Not really necessary anymore thanks to the # __getattr__ flattening in ContextList, but has some edge case # backwards compatibility implications. if response.context and len(response.context) == 1: response.context = response.context[0] # Update persistent cookie data. if response.cookies: self.cookies.update(response.cookies) return response def get(self, path, data=None, follow=False, secure=False, **extra): """Request a response from the server using GET.""" response = super().get(path, data=data, secure=secure, **extra) if follow: response = self._handle_redirects(response, data=data, **extra) return response def post(self, path, data=None, content_type=MULTIPART_CONTENT, follow=False, secure=False, **extra): """Request a response from the server using POST.""" response = super().post(path, data=data, content_type=content_type, secure=secure, **extra) if follow: response = self._handle_redirects(response, data=data, content_type=content_type, **extra) return response def head(self, path, data=None, follow=False, secure=False, **extra): """Request a response from the server using HEAD.""" response = super().head(path, data=data, secure=secure, **extra) if follow: response = self._handle_redirects(response, data=data, **extra) return response def options(self, path, data='', content_type='application/octet-stream', follow=False, secure=False, **extra): """Request a response from the server using OPTIONS.""" response = super().options(path, data=data, content_type=content_type, secure=secure, **extra) if follow: response = self._handle_redirects(response, data=data, content_type=content_type, **extra) return response def put(self, path, data='', content_type='application/octet-stream', follow=False, secure=False, **extra): """Send a resource to the server using PUT.""" response = super().put(path, data=data, content_type=content_type, secure=secure, **extra) if follow: response = self._handle_redirects(response, data=data, content_type=content_type, **extra) return response def patch(self, path, data='', content_type='application/octet-stream', follow=False, secure=False, **extra): """Send a resource to the server using PATCH.""" response = super().patch(path, data=data, content_type=content_type, secure=secure, **extra) if follow: response = self._handle_redirects(response, data=data, content_type=content_type, **extra) return response def delete(self, path, data='', content_type='application/octet-stream', follow=False, secure=False, **extra): """Send a DELETE request to the server.""" response = super().delete(path, data=data, content_type=content_type, secure=secure, **extra) if follow: response = self._handle_redirects(response, data=data, content_type=content_type, **extra) return response def trace(self, path, data='', follow=False, secure=False, **extra): """Send a TRACE request to the server.""" response = super().trace(path, data=data, secure=secure, **extra) if follow: response = self._handle_redirects(response, data=data, **extra) return response def login(self, **credentials): """ Set the Factory to appear as if it has successfully logged into a site. Return True if login is possible; False if the provided credentials are incorrect. """ from django.contrib.auth import authenticate user = authenticate(**credentials) if user: self._login(user) return True else: return False def force_login(self, user, backend=None): def get_backend(): from django.contrib.auth import load_backend for backend_path in settings.AUTHENTICATION_BACKENDS: backend = load_backend(backend_path) if hasattr(backend, 'get_user'): return backend_path if backend is None: backend = get_backend() user.backend = backend self._login(user, backend) def _login(self, user, backend=None): from django.contrib.auth import login engine = import_module(settings.SESSION_ENGINE) # Create a fake request to store login details. request = HttpRequest() if self.session: request.session = self.session else: request.session = engine.SessionStore() login(request, user, backend) # Save the session values. request.session.save() # Set the cookie to represent the session. session_cookie = settings.SESSION_COOKIE_NAME self.cookies[session_cookie] = request.session.session_key cookie_data = { 'max-age': None, 'path': '/', 'domain': settings.SESSION_COOKIE_DOMAIN, 'secure': settings.SESSION_COOKIE_SECURE or None, 'expires': None, } self.cookies[session_cookie].update(cookie_data) def logout(self): """Log out the user by removing the cookies and session object.""" from django.contrib.auth import get_user, logout request = HttpRequest() engine = import_module(settings.SESSION_ENGINE) if self.session: request.session = self.session request.user = get_user(request) else: request.session = engine.SessionStore() logout(request) self.cookies = SimpleCookie() def _parse_json(self, response, **extra): if not hasattr(response, '_json'): if not JSON_CONTENT_TYPE_RE.match(response.get('Content-Type')): raise ValueError( 'Content-Type header is "{0}", not "application/json"' .format(response.get('Content-Type')) ) response._json = json.loads(response.content.decode(response.charset), **extra) return response._json def _handle_redirects(self, response, data='', content_type='', **extra): """ Follow any redirects by requesting responses from the server using GET. """ response.redirect_chain = [] redirect_status_codes = ( HTTPStatus.MOVED_PERMANENTLY, HTTPStatus.FOUND, HTTPStatus.SEE_OTHER, HTTPStatus.TEMPORARY_REDIRECT, HTTPStatus.PERMANENT_REDIRECT, ) while response.status_code in redirect_status_codes: response_url = response.url redirect_chain = response.redirect_chain redirect_chain.append((response_url, response.status_code)) url = urlsplit(response_url) if url.scheme: extra['wsgi.url_scheme'] = url.scheme if url.hostname: extra['SERVER_NAME'] = url.hostname if url.port: extra['SERVER_PORT'] = str(url.port) # Prepend the request path to handle relative path redirects path = url.path if not path.startswith('/'): path = urljoin(response.request['PATH_INFO'], path) if response.status_code in (HTTPStatus.TEMPORARY_REDIRECT, HTTPStatus.PERMANENT_REDIRECT): # Preserve request method post-redirect for 307/308 responses. request_method = getattr(self, response.request['REQUEST_METHOD'].lower()) else: request_method = self.get data = QueryDict(url.query) content_type = None response = request_method(path, data=data, content_type=content_type, follow=False, **extra) response.redirect_chain = redirect_chain if redirect_chain[-1] in redirect_chain[:-1]: # Check that we're not redirecting to somewhere we've already # been to, to prevent loops. raise RedirectCycleError("Redirect loop detected.", last_response=response) if len(redirect_chain) > 20: # Such a lengthy chain likely also means a loop, but one with # a growing path, changing view, or changing query argument; # 20 is the value of "network.http.redirection-limit" from Firefox. raise RedirectCycleError("Too many redirects.", last_response=response) return response
4c79553dd59d888db48084880c513f6b3be150ea0725b56c9c82ff0b9ecfafbb
import asyncore import base64 import mimetypes import os import shutil import smtpd import sys import tempfile import threading from email import charset, message_from_binary_file, message_from_bytes from email.header import Header from email.mime.text import MIMEText from email.utils import parseaddr from io import StringIO from smtplib import SMTP, SMTPAuthenticationError, SMTPException from ssl import SSLError from django.core import mail from django.core.mail import ( EmailMessage, EmailMultiAlternatives, mail_admins, mail_managers, send_mail, send_mass_mail, ) from django.core.mail.backends import console, dummy, filebased, locmem, smtp from django.core.mail.message import BadHeaderError, sanitize_address from django.test import SimpleTestCase, override_settings from django.test.utils import requires_tz_support from django.utils.translation import gettext_lazy class HeadersCheckMixin: def assertMessageHasHeaders(self, message, headers): """ Asserts that the `message` has all `headers`. message: can be an instance of an email.Message subclass or a string with the contents of an email message. headers: should be a set of (header-name, header-value) tuples. """ if isinstance(message, bytes): message = message_from_bytes(message) msg_headers = set(message.items()) self.assertTrue(headers.issubset(msg_headers), msg='Message is missing ' 'the following headers: %s' % (headers - msg_headers),) class MailTests(HeadersCheckMixin, SimpleTestCase): """ Non-backend specific tests. """ def get_decoded_attachments(self, django_message): """ Encode the specified django.core.mail.message.EmailMessage, then decode it using Python's email.parser module and, for each attachment of the message, return a list of tuples with (filename, content, mimetype). """ msg_bytes = django_message.message().as_bytes() email_message = message_from_bytes(msg_bytes) def iter_attachments(): for i in email_message.walk(): if i.get_content_disposition() == 'attachment': filename = i.get_filename() content = i.get_payload(decode=True) mimetype = i.get_content_type() yield filename, content, mimetype return list(iter_attachments()) def test_ascii(self): email = EmailMessage('Subject', 'Content', '[email protected]', ['[email protected]']) message = email.message() self.assertEqual(message['Subject'], 'Subject') self.assertEqual(message.get_payload(), 'Content') self.assertEqual(message['From'], '[email protected]') self.assertEqual(message['To'], '[email protected]') def test_multiple_recipients(self): email = EmailMessage('Subject', 'Content', '[email protected]', ['[email protected]', '[email protected]']) message = email.message() self.assertEqual(message['Subject'], 'Subject') self.assertEqual(message.get_payload(), 'Content') self.assertEqual(message['From'], '[email protected]') self.assertEqual(message['To'], '[email protected], [email protected]') def test_header_omitted_for_no_to_recipients(self): message = EmailMessage('Subject', 'Content', '[email protected]', cc=['[email protected]']).message() self.assertNotIn('To', message) def test_recipients_with_empty_strings(self): """ Empty strings in various recipient arguments are always stripped off the final recipient list. """ email = EmailMessage( 'Subject', 'Content', '[email protected]', ['[email protected]', ''], cc=['[email protected]', ''], bcc=['', '[email protected]'], reply_to=['', None], ) self.assertEqual( email.recipients(), ['[email protected]', '[email protected]', '[email protected]'] ) def test_cc(self): """Regression test for #7722""" email = EmailMessage('Subject', 'Content', '[email protected]', ['[email protected]'], cc=['[email protected]']) message = email.message() self.assertEqual(message['Cc'], '[email protected]') self.assertEqual(email.recipients(), ['[email protected]', '[email protected]']) # Test multiple CC with multiple To email = EmailMessage( 'Subject', 'Content', '[email protected]', ['[email protected]', '[email protected]'], cc=['[email protected]', '[email protected]'] ) message = email.message() self.assertEqual(message['Cc'], '[email protected], [email protected]') self.assertEqual( email.recipients(), ['[email protected]', '[email protected]', '[email protected]', '[email protected]'] ) # Testing with Bcc email = EmailMessage( 'Subject', 'Content', '[email protected]', ['[email protected]', '[email protected]'], cc=['[email protected]', '[email protected]'], bcc=['[email protected]'] ) message = email.message() self.assertEqual(message['Cc'], '[email protected], [email protected]') self.assertEqual( email.recipients(), ['[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]'] ) def test_cc_headers(self): message = EmailMessage( 'Subject', 'Content', '[email protected]', ['[email protected]'], cc=['[email protected]'], headers={'Cc': '[email protected]'}, ).message() self.assertEqual(message['Cc'], '[email protected]') def test_cc_in_headers_only(self): message = EmailMessage( 'Subject', 'Content', '[email protected]', ['[email protected]'], headers={'Cc': '[email protected]'}, ).message() self.assertEqual(message['Cc'], '[email protected]') def test_reply_to(self): email = EmailMessage( 'Subject', 'Content', '[email protected]', ['[email protected]'], reply_to=['[email protected]'], ) message = email.message() self.assertEqual(message['Reply-To'], '[email protected]') email = EmailMessage( 'Subject', 'Content', '[email protected]', ['[email protected]'], reply_to=['[email protected]', '[email protected]'] ) message = email.message() self.assertEqual(message['Reply-To'], '[email protected], [email protected]') def test_recipients_as_tuple(self): email = EmailMessage( 'Subject', 'Content', '[email protected]', ('[email protected]', '[email protected]'), cc=('[email protected]', '[email protected]'), bcc=('[email protected]',) ) message = email.message() self.assertEqual(message['Cc'], '[email protected], [email protected]') self.assertEqual( email.recipients(), ['[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]'] ) def test_recipients_as_string(self): with self.assertRaisesMessage(TypeError, '"to" argument must be a list or tuple'): EmailMessage(to='[email protected]') with self.assertRaisesMessage(TypeError, '"cc" argument must be a list or tuple'): EmailMessage(cc='[email protected]') with self.assertRaisesMessage(TypeError, '"bcc" argument must be a list or tuple'): EmailMessage(bcc='[email protected]') with self.assertRaisesMessage(TypeError, '"reply_to" argument must be a list or tuple'): EmailMessage(reply_to='[email protected]') def test_header_injection(self): email = EmailMessage('Subject\nInjection Test', 'Content', '[email protected]', ['[email protected]']) with self.assertRaises(BadHeaderError): email.message() email = EmailMessage( gettext_lazy('Subject\nInjection Test'), 'Content', '[email protected]', ['[email protected]'] ) with self.assertRaises(BadHeaderError): email.message() def test_space_continuation(self): """ Test for space continuation character in long (ASCII) subject headers (#7747) """ email = EmailMessage( 'Long subject lines that get wrapped should contain a space ' 'continuation character to get expected behavior in Outlook and Thunderbird', 'Content', '[email protected]', ['[email protected]'] ) message = email.message() self.assertEqual( message['Subject'].encode(), b'Long subject lines that get wrapped should contain a space continuation\n' b' character to get expected behavior in Outlook and Thunderbird' ) def test_message_header_overrides(self): """ Specifying dates or message-ids in the extra headers overrides the default values (#9233) """ headers = {"date": "Fri, 09 Nov 2001 01:08:47 -0000", "Message-ID": "foo"} email = EmailMessage('subject', 'content', '[email protected]', ['[email protected]'], headers=headers) self.assertMessageHasHeaders(email.message(), { ('Content-Transfer-Encoding', '7bit'), ('Content-Type', 'text/plain; charset="utf-8"'), ('From', '[email protected]'), ('MIME-Version', '1.0'), ('Message-ID', 'foo'), ('Subject', 'subject'), ('To', '[email protected]'), ('date', 'Fri, 09 Nov 2001 01:08:47 -0000'), }) def test_from_header(self): """ Make sure we can manually set the From header (#9214) """ email = EmailMessage( 'Subject', 'Content', '[email protected]', ['[email protected]'], headers={'From': '[email protected]'}, ) message = email.message() self.assertEqual(message['From'], '[email protected]') def test_to_header(self): """ Make sure we can manually set the To header (#17444) """ email = EmailMessage('Subject', 'Content', '[email protected]', ['[email protected]', '[email protected]'], headers={'To': '[email protected]'}) message = email.message() self.assertEqual(message['To'], '[email protected]') self.assertEqual(email.to, ['[email protected]', '[email protected]']) # If we don't set the To header manually, it should default to the `to` argument to the constructor email = EmailMessage('Subject', 'Content', '[email protected]', ['[email protected]', '[email protected]']) message = email.message() self.assertEqual(message['To'], '[email protected], [email protected]') self.assertEqual(email.to, ['[email protected]', '[email protected]']) def test_to_in_headers_only(self): message = EmailMessage( 'Subject', 'Content', '[email protected]', headers={'To': '[email protected]'}, ).message() self.assertEqual(message['To'], '[email protected]') def test_reply_to_header(self): """ Specifying 'Reply-To' in headers should override reply_to. """ email = EmailMessage( 'Subject', 'Content', '[email protected]', ['[email protected]'], reply_to=['[email protected]'], headers={'Reply-To': '[email protected]'}, ) message = email.message() self.assertEqual(message['Reply-To'], '[email protected]') def test_reply_to_in_headers_only(self): message = EmailMessage( 'Subject', 'Content', '[email protected]', ['[email protected]'], headers={'Reply-To': '[email protected]'}, ).message() self.assertEqual(message['Reply-To'], '[email protected]') def test_multiple_message_call(self): """ Regression for #13259 - Make sure that headers are not changed when calling EmailMessage.message() """ email = EmailMessage( 'Subject', 'Content', '[email protected]', ['[email protected]'], headers={'From': '[email protected]'}, ) message = email.message() self.assertEqual(message['From'], '[email protected]') message = email.message() self.assertEqual(message['From'], '[email protected]') def test_unicode_address_header(self): """ Regression for #11144 - When a to/from/cc header contains unicode, make sure the email addresses are parsed correctly (especially with regards to commas) """ email = EmailMessage( 'Subject', 'Content', '[email protected]', ['"Firstname Sürname" <[email protected]>', '[email protected]'], ) self.assertEqual( email.message()['To'], '=?utf-8?q?Firstname_S=C3=BCrname?= <[email protected]>, [email protected]' ) email = EmailMessage( 'Subject', 'Content', '[email protected]', ['"Sürname, Firstname" <[email protected]>', '[email protected]'], ) self.assertEqual( email.message()['To'], '=?utf-8?q?S=C3=BCrname=2C_Firstname?= <[email protected]>, [email protected]' ) def test_unicode_headers(self): email = EmailMessage( 'Gżegżółka', 'Content', '[email protected]', ['[email protected]'], headers={ 'Sender': '"Firstname Sürname" <[email protected]>', 'Comments': 'My Sürname is non-ASCII', }, ) message = email.message() self.assertEqual(message['Subject'], '=?utf-8?b?R8W8ZWfFvMOzxYJrYQ==?=') self.assertEqual(message['Sender'], '=?utf-8?q?Firstname_S=C3=BCrname?= <[email protected]>') self.assertEqual(message['Comments'], '=?utf-8?q?My_S=C3=BCrname_is_non-ASCII?=') def test_safe_mime_multipart(self): """ Make sure headers can be set with a different encoding than utf-8 in SafeMIMEMultipart as well """ headers = {"Date": "Fri, 09 Nov 2001 01:08:47 -0000", "Message-ID": "foo"} from_email, to = '[email protected]', '"Sürname, Firstname" <[email protected]>' text_content = 'This is an important message.' html_content = '<p>This is an <strong>important</strong> message.</p>' msg = EmailMultiAlternatives('Message from Firstname Sürname', text_content, from_email, [to], headers=headers) msg.attach_alternative(html_content, "text/html") msg.encoding = 'iso-8859-1' self.assertEqual(msg.message()['To'], '=?iso-8859-1?q?S=FCrname=2C_Firstname?= <[email protected]>') self.assertEqual(msg.message()['Subject'], '=?iso-8859-1?q?Message_from_Firstname_S=FCrname?=') def test_safe_mime_multipart_with_attachments(self): """ EmailMultiAlternatives includes alternatives if the body is empty and it has attachments. """ msg = EmailMultiAlternatives(body='') html_content = '<p>This is <strong>html</strong></p>' msg.attach_alternative(html_content, 'text/html') msg.attach('example.txt', 'Text file content', 'text/plain') self.assertIn(html_content, msg.message().as_string()) def test_none_body(self): msg = EmailMessage('subject', None, '[email protected]', ['[email protected]']) self.assertEqual(msg.body, '') self.assertEqual(msg.message().get_payload(), '') def test_encoding(self): """ Regression for #12791 - Encode body correctly with other encodings than utf-8 """ email = EmailMessage('Subject', 'Firstname Sürname is a great guy.', '[email protected]', ['[email protected]']) email.encoding = 'iso-8859-1' message = email.message() self.assertMessageHasHeaders(message, { ('MIME-Version', '1.0'), ('Content-Type', 'text/plain; charset="iso-8859-1"'), ('Content-Transfer-Encoding', 'quoted-printable'), ('Subject', 'Subject'), ('From', '[email protected]'), ('To', '[email protected]')}) self.assertEqual(message.get_payload(), 'Firstname S=FCrname is a great guy.') # Make sure MIME attachments also works correctly with other encodings than utf-8 text_content = 'Firstname Sürname is a great guy.' html_content = '<p>Firstname Sürname is a <strong>great</strong> guy.</p>' msg = EmailMultiAlternatives('Subject', text_content, '[email protected]', ['[email protected]']) msg.encoding = 'iso-8859-1' msg.attach_alternative(html_content, "text/html") payload0 = msg.message().get_payload(0) self.assertMessageHasHeaders(payload0, { ('MIME-Version', '1.0'), ('Content-Type', 'text/plain; charset="iso-8859-1"'), ('Content-Transfer-Encoding', 'quoted-printable')}) self.assertTrue(payload0.as_bytes().endswith(b'\n\nFirstname S=FCrname is a great guy.')) payload1 = msg.message().get_payload(1) self.assertMessageHasHeaders(payload1, { ('MIME-Version', '1.0'), ('Content-Type', 'text/html; charset="iso-8859-1"'), ('Content-Transfer-Encoding', 'quoted-printable')}) self.assertTrue( payload1.as_bytes().endswith(b'\n\n<p>Firstname S=FCrname is a <strong>great</strong> guy.</p>') ) def test_attachments(self): """Regression test for #9367""" headers = {"Date": "Fri, 09 Nov 2001 01:08:47 -0000", "Message-ID": "foo"} subject, from_email, to = 'hello', '[email protected]', '[email protected]' text_content = 'This is an important message.' html_content = '<p>This is an <strong>important</strong> message.</p>' msg = EmailMultiAlternatives(subject, text_content, from_email, [to], headers=headers) msg.attach_alternative(html_content, "text/html") msg.attach("an attachment.pdf", b"%PDF-1.4.%...", mimetype="application/pdf") msg_bytes = msg.message().as_bytes() message = message_from_bytes(msg_bytes) self.assertTrue(message.is_multipart()) self.assertEqual(message.get_content_type(), 'multipart/mixed') self.assertEqual(message.get_default_type(), 'text/plain') payload = message.get_payload() self.assertEqual(payload[0].get_content_type(), 'multipart/alternative') self.assertEqual(payload[1].get_content_type(), 'application/pdf') def test_attachments_two_tuple(self): msg = EmailMessage(attachments=[('filename1', 'content1')]) filename, content, mimetype = self.get_decoded_attachments(msg)[0] self.assertEqual(filename, 'filename1') self.assertEqual(content, b'content1') self.assertEqual(mimetype, 'application/octet-stream') def test_attachments_MIMEText(self): txt = MIMEText('content1') msg = EmailMessage(attachments=[txt]) payload = msg.message().get_payload() self.assertEqual(payload[0], txt) def test_non_ascii_attachment_filename(self): """Regression test for #14964""" headers = {"Date": "Fri, 09 Nov 2001 01:08:47 -0000", "Message-ID": "foo"} subject, from_email, to = 'hello', '[email protected]', '[email protected]' content = 'This is the message.' msg = EmailMessage(subject, content, from_email, [to], headers=headers) # Unicode in file name msg.attach("une pièce jointe.pdf", b"%PDF-1.4.%...", mimetype="application/pdf") msg_bytes = msg.message().as_bytes() message = message_from_bytes(msg_bytes) payload = message.get_payload() self.assertEqual(payload[1].get_filename(), 'une pièce jointe.pdf') def test_attach_file(self): """ Test attaching a file against different mimetypes and make sure that a file will be attached and sent properly even if an invalid mimetype is specified. """ files = ( # filename, actual mimetype ('file.txt', 'text/plain'), ('file.png', 'image/png'), ('file_txt', None), ('file_png', None), ('file_txt.png', 'image/png'), ('file_png.txt', 'text/plain'), ('file.eml', 'message/rfc822'), ) test_mimetypes = ['text/plain', 'image/png', None] for basename, real_mimetype in files: for mimetype in test_mimetypes: email = EmailMessage('subject', 'body', '[email protected]', ['[email protected]']) self.assertEqual(mimetypes.guess_type(basename)[0], real_mimetype) self.assertEqual(email.attachments, []) file_path = os.path.join(os.path.dirname(__file__), 'attachments', basename) email.attach_file(file_path, mimetype=mimetype) self.assertEqual(len(email.attachments), 1) self.assertIn(basename, email.attachments[0]) msgs_sent_num = email.send() self.assertEqual(msgs_sent_num, 1) def test_attach_text_as_bytes(self): msg = EmailMessage('subject', 'body', '[email protected]', ['[email protected]']) msg.attach('file.txt', b'file content') sent_num = msg.send() self.assertEqual(sent_num, 1) filename, content, mimetype = self.get_decoded_attachments(msg)[0] self.assertEqual(filename, 'file.txt') self.assertEqual(content, b'file content') self.assertEqual(mimetype, 'text/plain') def test_attach_utf8_text_as_bytes(self): """ Non-ASCII characters encoded as valid UTF-8 are correctly transported and decoded. """ msg = EmailMessage('subject', 'body', '[email protected]', ['[email protected]']) msg.attach('file.txt', b'\xc3\xa4') # UTF-8 encoded a umlaut. filename, content, mimetype = self.get_decoded_attachments(msg)[0] self.assertEqual(filename, 'file.txt') self.assertEqual(content, b'\xc3\xa4') self.assertEqual(mimetype, 'text/plain') def test_attach_non_utf8_text_as_bytes(self): """ Binary data that can't be decoded as UTF-8 overrides the MIME type instead of decoding the data. """ msg = EmailMessage('subject', 'body', '[email protected]', ['[email protected]']) msg.attach('file.txt', b'\xff') # Invalid UTF-8. filename, content, mimetype = self.get_decoded_attachments(msg)[0] self.assertEqual(filename, 'file.txt') # Content should be passed through unmodified. self.assertEqual(content, b'\xff') self.assertEqual(mimetype, 'application/octet-stream') def test_dummy_backend(self): """ Make sure that dummy backends returns correct number of sent messages """ connection = dummy.EmailBackend() email = EmailMessage( 'Subject', 'Content', '[email protected]', ['[email protected]'], headers={'From': '[email protected]'}, ) self.assertEqual(connection.send_messages([email, email, email]), 3) def test_arbitrary_keyword(self): """ Make sure that get_connection() accepts arbitrary keyword that might be used with custom backends. """ c = mail.get_connection(fail_silently=True, foo='bar') self.assertTrue(c.fail_silently) def test_custom_backend(self): """Test custom backend defined in this suite.""" conn = mail.get_connection('mail.custombackend.EmailBackend') self.assertTrue(hasattr(conn, 'test_outbox')) email = EmailMessage( 'Subject', 'Content', '[email protected]', ['[email protected]'], headers={'From': '[email protected]'}, ) conn.send_messages([email]) self.assertEqual(len(conn.test_outbox), 1) def test_backend_arg(self): """Test backend argument of mail.get_connection()""" self.assertIsInstance(mail.get_connection('django.core.mail.backends.smtp.EmailBackend'), smtp.EmailBackend) self.assertIsInstance( mail.get_connection('django.core.mail.backends.locmem.EmailBackend'), locmem.EmailBackend ) self.assertIsInstance(mail.get_connection('django.core.mail.backends.dummy.EmailBackend'), dummy.EmailBackend) self.assertIsInstance( mail.get_connection('django.core.mail.backends.console.EmailBackend'), console.EmailBackend ) with tempfile.TemporaryDirectory() as tmp_dir: self.assertIsInstance( mail.get_connection('django.core.mail.backends.filebased.EmailBackend', file_path=tmp_dir), filebased.EmailBackend ) self.assertIsInstance(mail.get_connection(), locmem.EmailBackend) @override_settings( EMAIL_BACKEND='django.core.mail.backends.locmem.EmailBackend', ADMINS=[('nobody', '[email protected]')], MANAGERS=[('nobody', '[email protected]')]) def test_connection_arg(self): """Test connection argument to send_mail(), et. al.""" mail.outbox = [] # Send using non-default connection connection = mail.get_connection('mail.custombackend.EmailBackend') send_mail('Subject', 'Content', '[email protected]', ['[email protected]'], connection=connection) self.assertEqual(mail.outbox, []) self.assertEqual(len(connection.test_outbox), 1) self.assertEqual(connection.test_outbox[0].subject, 'Subject') connection = mail.get_connection('mail.custombackend.EmailBackend') send_mass_mail([ ('Subject1', 'Content1', '[email protected]', ['[email protected]']), ('Subject2', 'Content2', '[email protected]', ['[email protected]']), ], connection=connection) self.assertEqual(mail.outbox, []) self.assertEqual(len(connection.test_outbox), 2) self.assertEqual(connection.test_outbox[0].subject, 'Subject1') self.assertEqual(connection.test_outbox[1].subject, 'Subject2') connection = mail.get_connection('mail.custombackend.EmailBackend') mail_admins('Admin message', 'Content', connection=connection) self.assertEqual(mail.outbox, []) self.assertEqual(len(connection.test_outbox), 1) self.assertEqual(connection.test_outbox[0].subject, '[Django] Admin message') connection = mail.get_connection('mail.custombackend.EmailBackend') mail_managers('Manager message', 'Content', connection=connection) self.assertEqual(mail.outbox, []) self.assertEqual(len(connection.test_outbox), 1) self.assertEqual(connection.test_outbox[0].subject, '[Django] Manager message') def test_dont_mangle_from_in_body(self): # Regression for #13433 - Make sure that EmailMessage doesn't mangle # 'From ' in message body. email = EmailMessage( 'Subject', 'From the future', '[email protected]', ['[email protected]'], headers={'From': '[email protected]'}, ) self.assertNotIn(b'>From the future', email.message().as_bytes()) def test_dont_base64_encode(self): # Ticket #3472 # Shouldn't use Base64 encoding at all msg = EmailMessage( 'Subject', 'UTF-8 encoded body', '[email protected]', ['[email protected]'], headers={'From': '[email protected]'}, ) self.assertIn(b'Content-Transfer-Encoding: 7bit', msg.message().as_bytes()) # Ticket #11212 # Shouldn't use quoted printable, should detect it can represent content with 7 bit data msg = EmailMessage( 'Subject', 'Body with only ASCII characters.', '[email protected]', ['[email protected]'], headers={'From': '[email protected]'}, ) s = msg.message().as_bytes() self.assertIn(b'Content-Transfer-Encoding: 7bit', s) # Shouldn't use quoted printable, should detect it can represent content with 8 bit data msg = EmailMessage( 'Subject', 'Body with latin characters: àáä.', '[email protected]', ['[email protected]'], headers={'From': '[email protected]'}, ) s = msg.message().as_bytes() self.assertIn(b'Content-Transfer-Encoding: 8bit', s) s = msg.message().as_string() self.assertIn('Content-Transfer-Encoding: 8bit', s) msg = EmailMessage( 'Subject', 'Body with non latin characters: А Б В Г Д Е Ж Ѕ З И І К Л М Н О П.', '[email protected]', ['[email protected]'], headers={'From': '[email protected]'}, ) s = msg.message().as_bytes() self.assertIn(b'Content-Transfer-Encoding: 8bit', s) s = msg.message().as_string() self.assertIn('Content-Transfer-Encoding: 8bit', s) def test_dont_base64_encode_message_rfc822(self): # Ticket #18967 # Shouldn't use base64 encoding for a child EmailMessage attachment. # Create a child message first child_msg = EmailMessage( 'Child Subject', 'Some body of child message', '[email protected]', ['[email protected]'], headers={'From': '[email protected]'}, ) child_s = child_msg.message().as_string() # Now create a parent parent_msg = EmailMessage( 'Parent Subject', 'Some parent body', '[email protected]', ['[email protected]'], headers={'From': '[email protected]'}, ) # Attach to parent as a string parent_msg.attach(content=child_s, mimetype='message/rfc822') parent_s = parent_msg.message().as_string() # The child message header is not base64 encoded self.assertIn('Child Subject', parent_s) # Feature test: try attaching email.Message object directly to the mail. parent_msg = EmailMessage( 'Parent Subject', 'Some parent body', '[email protected]', ['[email protected]'], headers={'From': '[email protected]'}, ) parent_msg.attach(content=child_msg.message(), mimetype='message/rfc822') parent_s = parent_msg.message().as_string() # The child message header is not base64 encoded self.assertIn('Child Subject', parent_s) # Feature test: try attaching Django's EmailMessage object directly to the mail. parent_msg = EmailMessage( 'Parent Subject', 'Some parent body', '[email protected]', ['[email protected]'], headers={'From': '[email protected]'}, ) parent_msg.attach(content=child_msg, mimetype='message/rfc822') parent_s = parent_msg.message().as_string() # The child message header is not base64 encoded self.assertIn('Child Subject', parent_s) def test_custom_utf8_encoding(self): """A UTF-8 charset with a custom body encoding is respected.""" body = 'Body with latin characters: àáä.' msg = EmailMessage('Subject', body, '[email protected]', ['[email protected]']) encoding = charset.Charset('utf-8') encoding.body_encoding = charset.QP msg.encoding = encoding message = msg.message() self.assertMessageHasHeaders(message, { ('MIME-Version', '1.0'), ('Content-Type', 'text/plain; charset="utf-8"'), ('Content-Transfer-Encoding', 'quoted-printable'), }) self.assertEqual(message.get_payload(), encoding.body_encode(body)) def test_sanitize_address(self): """Email addresses are properly sanitized.""" for email_address, encoding, expected_result in ( # ASCII addresses. ('[email protected]', 'ascii', '[email protected]'), ('[email protected]', 'utf-8', '[email protected]'), (('A name', '[email protected]'), 'ascii', 'A name <[email protected]>'), ( ('A name', '[email protected]'), 'utf-8', '=?utf-8?q?A_name?= <[email protected]>', ), # ASCII addresses with display names. ('A name <[email protected]>', 'ascii', 'A name <[email protected]>'), ('A name <[email protected]>', 'utf-8', '=?utf-8?q?A_name?= <[email protected]>'), ('"A name" <[email protected]>', 'ascii', 'A name <[email protected]>'), ('"A name" <[email protected]>', 'utf-8', '=?utf-8?q?A_name?= <[email protected]>'), # Unicode addresses (supported per RFC-6532). ('tó@example.com', 'utf-8', '[email protected]'), ('to@éxample.com', 'utf-8', '[email protected]'), ( ('Tó Example', 'tó@example.com'), 'utf-8', '=?utf-8?q?T=C3=B3_Example?= <[email protected]>', ), # Unicode addresses with display names. ( 'Tó Example <tó@example.com>', 'utf-8', '=?utf-8?q?T=C3=B3_Example?= <[email protected]>', ), ('To Example <to@éxample.com>', 'ascii', 'To Example <[email protected]>'), ( 'To Example <to@éxample.com>', 'utf-8', '=?utf-8?q?To_Example?= <[email protected]>', ), # Addresses with two @ signs. ('"[email protected]"@example.com', 'utf-8', r'"[email protected]"@example.com'), ( '"[email protected]" <[email protected]>', 'utf-8', '=?utf-8?q?to=40other=2Ecom?= <[email protected]>', ), ): with self.subTest(email_address=email_address, encoding=encoding): self.assertEqual(sanitize_address(email_address, encoding), expected_result) @requires_tz_support class MailTimeZoneTests(SimpleTestCase): @override_settings(EMAIL_USE_LOCALTIME=False, USE_TZ=True, TIME_ZONE='Africa/Algiers') def test_date_header_utc(self): """ EMAIL_USE_LOCALTIME=False creates a datetime in UTC. """ email = EmailMessage('Subject', 'Body', '[email protected]', ['[email protected]']) self.assertTrue(email.message()['Date'].endswith('-0000')) @override_settings(EMAIL_USE_LOCALTIME=True, USE_TZ=True, TIME_ZONE='Africa/Algiers') def test_date_header_localtime(self): """ EMAIL_USE_LOCALTIME=True creates a datetime in the local time zone. """ email = EmailMessage('Subject', 'Body', '[email protected]', ['[email protected]']) self.assertTrue(email.message()['Date'].endswith('+0100')) # Africa/Algiers is UTC+1 class PythonGlobalState(SimpleTestCase): """ Tests for #12422 -- Django smarts (#2472/#11212) with charset of utf-8 text parts shouldn't pollute global email Python package charset registry when django.mail.message is imported. """ def test_utf8(self): txt = MIMEText('UTF-8 encoded body', 'plain', 'utf-8') self.assertIn('Content-Transfer-Encoding: base64', txt.as_string()) def test_7bit(self): txt = MIMEText('Body with only ASCII characters.', 'plain', 'utf-8') self.assertIn('Content-Transfer-Encoding: base64', txt.as_string()) def test_8bit_latin(self): txt = MIMEText('Body with latin characters: àáä.', 'plain', 'utf-8') self.assertIn('Content-Transfer-Encoding: base64', txt.as_string()) def test_8bit_non_latin(self): txt = MIMEText('Body with non latin characters: А Б В Г Д Е Ж Ѕ З И І К Л М Н О П.', 'plain', 'utf-8') self.assertIn('Content-Transfer-Encoding: base64', txt.as_string()) class BaseEmailBackendTests(HeadersCheckMixin): email_backend = None def setUp(self): self.settings_override = override_settings(EMAIL_BACKEND=self.email_backend) self.settings_override.enable() def tearDown(self): self.settings_override.disable() def assertStartsWith(self, first, second): if not first.startswith(second): self.longMessage = True self.assertEqual(first[:len(second)], second, "First string doesn't start with the second.") def get_mailbox_content(self): raise NotImplementedError('subclasses of BaseEmailBackendTests must provide a get_mailbox_content() method') def flush_mailbox(self): raise NotImplementedError('subclasses of BaseEmailBackendTests may require a flush_mailbox() method') def get_the_message(self): mailbox = self.get_mailbox_content() self.assertEqual( len(mailbox), 1, "Expected exactly one message, got %d.\n%r" % (len(mailbox), [m.as_string() for m in mailbox]) ) return mailbox[0] def test_send(self): email = EmailMessage('Subject', 'Content', '[email protected]', ['[email protected]']) num_sent = mail.get_connection().send_messages([email]) self.assertEqual(num_sent, 1) message = self.get_the_message() self.assertEqual(message["subject"], "Subject") self.assertEqual(message.get_payload(), "Content") self.assertEqual(message["from"], "[email protected]") self.assertEqual(message.get_all("to"), ["[email protected]"]) def test_send_unicode(self): email = EmailMessage('Chère maman', 'Je t\'aime très fort', '[email protected]', ['[email protected]']) num_sent = mail.get_connection().send_messages([email]) self.assertEqual(num_sent, 1) message = self.get_the_message() self.assertEqual(message["subject"], '=?utf-8?q?Ch=C3=A8re_maman?=') self.assertEqual(message.get_payload(decode=True).decode(), 'Je t\'aime très fort') def test_send_long_lines(self): """ Email line length is limited to 998 chars by the RFC: https://tools.ietf.org/html/rfc5322#section-2.1.1 Message body containing longer lines are converted to Quoted-Printable to avoid having to insert newlines, which could be hairy to do properly. """ # Unencoded body length is < 998 (840) but > 998 when utf-8 encoded. email = EmailMessage('Subject', 'В южных морях ' * 60, '[email protected]', ['[email protected]']) email.send() message = self.get_the_message() self.assertMessageHasHeaders(message, { ('MIME-Version', '1.0'), ('Content-Type', 'text/plain; charset="utf-8"'), ('Content-Transfer-Encoding', 'quoted-printable'), }) def test_send_many(self): email1 = EmailMessage('Subject', 'Content1', '[email protected]', ['[email protected]']) email2 = EmailMessage('Subject', 'Content2', '[email protected]', ['[email protected]']) # send_messages() may take a list or an iterator. emails_lists = ([email1, email2], iter((email1, email2))) for emails_list in emails_lists: num_sent = mail.get_connection().send_messages(emails_list) self.assertEqual(num_sent, 2) messages = self.get_mailbox_content() self.assertEqual(len(messages), 2) self.assertEqual(messages[0].get_payload(), 'Content1') self.assertEqual(messages[1].get_payload(), 'Content2') self.flush_mailbox() def test_send_verbose_name(self): email = EmailMessage("Subject", "Content", '"Firstname Sürname" <[email protected]>', ["[email protected]"]) email.send() message = self.get_the_message() self.assertEqual(message["subject"], "Subject") self.assertEqual(message.get_payload(), "Content") self.assertEqual(message["from"], "=?utf-8?q?Firstname_S=C3=BCrname?= <[email protected]>") def test_plaintext_send_mail(self): """ Test send_mail without the html_message regression test for adding html_message parameter to send_mail() """ send_mail('Subject', 'Content', '[email protected]', ['[email protected]']) message = self.get_the_message() self.assertEqual(message.get('subject'), 'Subject') self.assertEqual(message.get_all('to'), ['[email protected]']) self.assertFalse(message.is_multipart()) self.assertEqual(message.get_payload(), 'Content') self.assertEqual(message.get_content_type(), 'text/plain') def test_html_send_mail(self): """Test html_message argument to send_mail""" send_mail('Subject', 'Content', '[email protected]', ['[email protected]'], html_message='HTML Content') message = self.get_the_message() self.assertEqual(message.get('subject'), 'Subject') self.assertEqual(message.get_all('to'), ['[email protected]']) self.assertTrue(message.is_multipart()) self.assertEqual(len(message.get_payload()), 2) self.assertEqual(message.get_payload(0).get_payload(), 'Content') self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain') self.assertEqual(message.get_payload(1).get_payload(), 'HTML Content') self.assertEqual(message.get_payload(1).get_content_type(), 'text/html') @override_settings(MANAGERS=[('nobody', '[email protected]')]) def test_html_mail_managers(self): """Test html_message argument to mail_managers""" mail_managers('Subject', 'Content', html_message='HTML Content') message = self.get_the_message() self.assertEqual(message.get('subject'), '[Django] Subject') self.assertEqual(message.get_all('to'), ['[email protected]']) self.assertTrue(message.is_multipart()) self.assertEqual(len(message.get_payload()), 2) self.assertEqual(message.get_payload(0).get_payload(), 'Content') self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain') self.assertEqual(message.get_payload(1).get_payload(), 'HTML Content') self.assertEqual(message.get_payload(1).get_content_type(), 'text/html') @override_settings(ADMINS=[('nobody', '[email protected]')]) def test_html_mail_admins(self): """Test html_message argument to mail_admins """ mail_admins('Subject', 'Content', html_message='HTML Content') message = self.get_the_message() self.assertEqual(message.get('subject'), '[Django] Subject') self.assertEqual(message.get_all('to'), ['[email protected]']) self.assertTrue(message.is_multipart()) self.assertEqual(len(message.get_payload()), 2) self.assertEqual(message.get_payload(0).get_payload(), 'Content') self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain') self.assertEqual(message.get_payload(1).get_payload(), 'HTML Content') self.assertEqual(message.get_payload(1).get_content_type(), 'text/html') @override_settings( ADMINS=[('nobody', '[email protected]')], MANAGERS=[('nobody', '[email protected]')]) def test_manager_and_admin_mail_prefix(self): """ String prefix + lazy translated subject = bad output Regression for #13494 """ mail_managers(gettext_lazy('Subject'), 'Content') message = self.get_the_message() self.assertEqual(message.get('subject'), '[Django] Subject') self.flush_mailbox() mail_admins(gettext_lazy('Subject'), 'Content') message = self.get_the_message() self.assertEqual(message.get('subject'), '[Django] Subject') @override_settings(ADMINS=[], MANAGERS=[]) def test_empty_admins(self): """ mail_admins/mail_managers doesn't connect to the mail server if there are no recipients (#9383) """ mail_admins('hi', 'there') self.assertEqual(self.get_mailbox_content(), []) mail_managers('hi', 'there') self.assertEqual(self.get_mailbox_content(), []) def test_message_cc_header(self): """ Regression test for #7722 """ email = EmailMessage('Subject', 'Content', '[email protected]', ['[email protected]'], cc=['[email protected]']) mail.get_connection().send_messages([email]) message = self.get_the_message() self.assertMessageHasHeaders(message, { ('MIME-Version', '1.0'), ('Content-Type', 'text/plain; charset="utf-8"'), ('Content-Transfer-Encoding', '7bit'), ('Subject', 'Subject'), ('From', '[email protected]'), ('To', '[email protected]'), ('Cc', '[email protected]')}) self.assertIn('\nDate: ', message.as_string()) def test_idn_send(self): """ Regression test for #14301 """ self.assertTrue(send_mail('Subject', 'Content', 'from@öäü.com', ['to@öäü.com'])) message = self.get_the_message() self.assertEqual(message.get('subject'), 'Subject') self.assertEqual(message.get('from'), '[email protected]') self.assertEqual(message.get('to'), '[email protected]') self.flush_mailbox() m = EmailMessage('Subject', 'Content', 'from@öäü.com', ['to@öäü.com'], cc=['cc@öäü.com']) m.send() message = self.get_the_message() self.assertEqual(message.get('subject'), 'Subject') self.assertEqual(message.get('from'), '[email protected]') self.assertEqual(message.get('to'), '[email protected]') self.assertEqual(message.get('cc'), '[email protected]') def test_recipient_without_domain(self): """ Regression test for #15042 """ self.assertTrue(send_mail("Subject", "Content", "tester", ["django"])) message = self.get_the_message() self.assertEqual(message.get('subject'), 'Subject') self.assertEqual(message.get('from'), "tester") self.assertEqual(message.get('to'), "django") def test_lazy_addresses(self): """ Email sending should support lazy email addresses (#24416). """ _ = gettext_lazy self.assertTrue(send_mail('Subject', 'Content', _('tester'), [_('django')])) message = self.get_the_message() self.assertEqual(message.get('from'), 'tester') self.assertEqual(message.get('to'), 'django') self.flush_mailbox() m = EmailMessage( 'Subject', 'Content', _('tester'), [_('to1'), _('to2')], cc=[_('cc1'), _('cc2')], bcc=[_('bcc')], reply_to=[_('reply')], ) self.assertEqual(m.recipients(), ['to1', 'to2', 'cc1', 'cc2', 'bcc']) m.send() message = self.get_the_message() self.assertEqual(message.get('from'), 'tester') self.assertEqual(message.get('to'), 'to1, to2') self.assertEqual(message.get('cc'), 'cc1, cc2') self.assertEqual(message.get('Reply-To'), 'reply') def test_close_connection(self): """ Connection can be closed (even when not explicitly opened) """ conn = mail.get_connection(username='', password='') conn.close() def test_use_as_contextmanager(self): """ The connection can be used as a contextmanager. """ opened = [False] closed = [False] conn = mail.get_connection(username='', password='') def open(): opened[0] = True conn.open = open def close(): closed[0] = True conn.close = close with conn as same_conn: self.assertTrue(opened[0]) self.assertIs(same_conn, conn) self.assertFalse(closed[0]) self.assertTrue(closed[0]) class LocmemBackendTests(BaseEmailBackendTests, SimpleTestCase): email_backend = 'django.core.mail.backends.locmem.EmailBackend' def get_mailbox_content(self): return [m.message() for m in mail.outbox] def flush_mailbox(self): mail.outbox = [] def tearDown(self): super().tearDown() mail.outbox = [] def test_locmem_shared_messages(self): """ Make sure that the locmen backend populates the outbox. """ connection = locmem.EmailBackend() connection2 = locmem.EmailBackend() email = EmailMessage( 'Subject', 'Content', '[email protected]', ['[email protected]'], headers={'From': '[email protected]'}, ) connection.send_messages([email]) connection2.send_messages([email]) self.assertEqual(len(mail.outbox), 2) def test_validate_multiline_headers(self): # Ticket #18861 - Validate emails when using the locmem backend with self.assertRaises(BadHeaderError): send_mail('Subject\nMultiline', 'Content', '[email protected]', ['[email protected]']) class FileBackendTests(BaseEmailBackendTests, SimpleTestCase): email_backend = 'django.core.mail.backends.filebased.EmailBackend' def setUp(self): super().setUp() self.tmp_dir = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, self.tmp_dir) self._settings_override = override_settings(EMAIL_FILE_PATH=self.tmp_dir) self._settings_override.enable() def tearDown(self): self._settings_override.disable() super().tearDown() def flush_mailbox(self): for filename in os.listdir(self.tmp_dir): os.unlink(os.path.join(self.tmp_dir, filename)) def get_mailbox_content(self): messages = [] for filename in os.listdir(self.tmp_dir): with open(os.path.join(self.tmp_dir, filename), 'rb') as fp: session = fp.read().split(b'\n' + (b'-' * 79) + b'\n') messages.extend(message_from_bytes(m) for m in session if m) return messages def test_file_sessions(self): """Make sure opening a connection creates a new file""" msg = EmailMessage( 'Subject', 'Content', '[email protected]', ['[email protected]'], headers={'From': '[email protected]'}, ) connection = mail.get_connection() connection.send_messages([msg]) self.assertEqual(len(os.listdir(self.tmp_dir)), 1) with open(os.path.join(self.tmp_dir, os.listdir(self.tmp_dir)[0]), 'rb') as fp: message = message_from_binary_file(fp) self.assertEqual(message.get_content_type(), 'text/plain') self.assertEqual(message.get('subject'), 'Subject') self.assertEqual(message.get('from'), '[email protected]') self.assertEqual(message.get('to'), '[email protected]') connection2 = mail.get_connection() connection2.send_messages([msg]) self.assertEqual(len(os.listdir(self.tmp_dir)), 2) connection.send_messages([msg]) self.assertEqual(len(os.listdir(self.tmp_dir)), 2) msg.connection = mail.get_connection() self.assertTrue(connection.open()) msg.send() self.assertEqual(len(os.listdir(self.tmp_dir)), 3) msg.send() self.assertEqual(len(os.listdir(self.tmp_dir)), 3) connection.close() class ConsoleBackendTests(BaseEmailBackendTests, SimpleTestCase): email_backend = 'django.core.mail.backends.console.EmailBackend' def setUp(self): super().setUp() self.__stdout = sys.stdout self.stream = sys.stdout = StringIO() def tearDown(self): del self.stream sys.stdout = self.__stdout del self.__stdout super().tearDown() def flush_mailbox(self): self.stream = sys.stdout = StringIO() def get_mailbox_content(self): messages = self.stream.getvalue().split('\n' + ('-' * 79) + '\n') return [message_from_bytes(m.encode()) for m in messages if m] def test_console_stream_kwarg(self): """ The console backend can be pointed at an arbitrary stream. """ s = StringIO() connection = mail.get_connection('django.core.mail.backends.console.EmailBackend', stream=s) send_mail('Subject', 'Content', '[email protected]', ['[email protected]'], connection=connection) message = s.getvalue().split('\n' + ('-' * 79) + '\n')[0].encode() self.assertMessageHasHeaders(message, { ('MIME-Version', '1.0'), ('Content-Type', 'text/plain; charset="utf-8"'), ('Content-Transfer-Encoding', '7bit'), ('Subject', 'Subject'), ('From', '[email protected]'), ('To', '[email protected]')}) self.assertIn(b'\nDate: ', message) class FakeSMTPChannel(smtpd.SMTPChannel): def collect_incoming_data(self, data): try: smtpd.SMTPChannel.collect_incoming_data(self, data) except UnicodeDecodeError: # Ignore decode error in SSL/TLS connection tests as the test only # cares whether the connection attempt was made. pass def smtp_AUTH(self, arg): if arg == 'CRAM-MD5': # This is only the first part of the login process. But it's enough # for our tests. challenge = base64.b64encode(b'somerandomstring13579') self.push('334 %s' % challenge.decode()) else: self.push('502 Error: login "%s" not implemented' % arg) class FakeSMTPServer(smtpd.SMTPServer, threading.Thread): """ Asyncore SMTP server wrapped into a thread. Based on DummyFTPServer from: http://svn.python.org/view/python/branches/py3k/Lib/test/test_ftplib.py?revision=86061&view=markup """ channel_class = FakeSMTPChannel def __init__(self, *args, **kwargs): threading.Thread.__init__(self) smtpd.SMTPServer.__init__(self, *args, decode_data=True, **kwargs) self._sink = [] self.active = False self.active_lock = threading.Lock() self.sink_lock = threading.Lock() def process_message(self, peer, mailfrom, rcpttos, data): data = data.encode() m = message_from_bytes(data) maddr = parseaddr(m.get('from'))[1] if mailfrom != maddr: # According to the spec, mailfrom does not necessarily match the # From header - this is the case where the local part isn't # encoded, so try to correct that. lp, domain = mailfrom.split('@', 1) lp = Header(lp, 'utf-8').encode() mailfrom = '@'.join([lp, domain]) if mailfrom != maddr: return "553 '%s' != '%s'" % (mailfrom, maddr) with self.sink_lock: self._sink.append(m) def get_sink(self): with self.sink_lock: return self._sink[:] def flush_sink(self): with self.sink_lock: self._sink[:] = [] def start(self): assert not self.active self.__flag = threading.Event() threading.Thread.start(self) self.__flag.wait() def run(self): self.active = True self.__flag.set() while self.active and asyncore.socket_map: with self.active_lock: asyncore.loop(timeout=0.1, count=1) asyncore.close_all() def stop(self): if self.active: self.active = False self.join() class FakeAUTHSMTPConnection(SMTP): """ A SMTP connection pretending support for the AUTH command. It does not, but at least this can allow testing the first part of the AUTH process. """ def ehlo(self, name=''): response = SMTP.ehlo(self, name=name) self.esmtp_features.update({ 'auth': 'CRAM-MD5 PLAIN LOGIN', }) return response class SMTPBackendTestsBase(SimpleTestCase): @classmethod def setUpClass(cls): super().setUpClass() cls.server = FakeSMTPServer(('127.0.0.1', 0), None) cls._settings_override = override_settings( EMAIL_HOST="127.0.0.1", EMAIL_PORT=cls.server.socket.getsockname()[1]) cls._settings_override.enable() cls.server.start() @classmethod def tearDownClass(cls): cls._settings_override.disable() cls.server.stop() super().tearDownClass() class SMTPBackendTests(BaseEmailBackendTests, SMTPBackendTestsBase): email_backend = 'django.core.mail.backends.smtp.EmailBackend' def setUp(self): super().setUp() self.server.flush_sink() def tearDown(self): self.server.flush_sink() super().tearDown() def flush_mailbox(self): self.server.flush_sink() def get_mailbox_content(self): return self.server.get_sink() @override_settings( EMAIL_HOST_USER="not empty username", EMAIL_HOST_PASSWORD='not empty password', ) def test_email_authentication_use_settings(self): backend = smtp.EmailBackend() self.assertEqual(backend.username, 'not empty username') self.assertEqual(backend.password, 'not empty password') @override_settings( EMAIL_HOST_USER="not empty username", EMAIL_HOST_PASSWORD='not empty password', ) def test_email_authentication_override_settings(self): backend = smtp.EmailBackend(username='username', password='password') self.assertEqual(backend.username, 'username') self.assertEqual(backend.password, 'password') @override_settings( EMAIL_HOST_USER="not empty username", EMAIL_HOST_PASSWORD='not empty password', ) def test_email_disabled_authentication(self): backend = smtp.EmailBackend(username='', password='') self.assertEqual(backend.username, '') self.assertEqual(backend.password, '') def test_auth_attempted(self): """ Opening the backend with non empty username/password tries to authenticate against the SMTP server. """ backend = smtp.EmailBackend( username='not empty username', password='not empty password') with self.assertRaisesMessage(SMTPException, 'SMTP AUTH extension not supported by server.'): with backend: pass def test_server_open(self): """ open() returns whether it opened a connection. """ backend = smtp.EmailBackend(username='', password='') self.assertIsNone(backend.connection) opened = backend.open() backend.close() self.assertIs(opened, True) def test_reopen_connection(self): backend = smtp.EmailBackend() # Simulate an already open connection. backend.connection = True self.assertIs(backend.open(), False) def test_server_login(self): """ Even if the Python SMTP server doesn't support authentication, the login process starts and the appropriate exception is raised. """ class CustomEmailBackend(smtp.EmailBackend): connection_class = FakeAUTHSMTPConnection backend = CustomEmailBackend(username='username', password='password') with self.assertRaises(SMTPAuthenticationError): with backend: pass @override_settings(EMAIL_USE_TLS=True) def test_email_tls_use_settings(self): backend = smtp.EmailBackend() self.assertTrue(backend.use_tls) @override_settings(EMAIL_USE_TLS=True) def test_email_tls_override_settings(self): backend = smtp.EmailBackend(use_tls=False) self.assertFalse(backend.use_tls) def test_email_tls_default_disabled(self): backend = smtp.EmailBackend() self.assertFalse(backend.use_tls) def test_ssl_tls_mutually_exclusive(self): msg = ( 'EMAIL_USE_TLS/EMAIL_USE_SSL are mutually exclusive, so only set ' 'one of those settings to True.' ) with self.assertRaisesMessage(ValueError, msg): smtp.EmailBackend(use_ssl=True, use_tls=True) @override_settings(EMAIL_USE_SSL=True) def test_email_ssl_use_settings(self): backend = smtp.EmailBackend() self.assertTrue(backend.use_ssl) @override_settings(EMAIL_USE_SSL=True) def test_email_ssl_override_settings(self): backend = smtp.EmailBackend(use_ssl=False) self.assertFalse(backend.use_ssl) def test_email_ssl_default_disabled(self): backend = smtp.EmailBackend() self.assertFalse(backend.use_ssl) @override_settings(EMAIL_SSL_CERTFILE='foo') def test_email_ssl_certfile_use_settings(self): backend = smtp.EmailBackend() self.assertEqual(backend.ssl_certfile, 'foo') @override_settings(EMAIL_SSL_CERTFILE='foo') def test_email_ssl_certfile_override_settings(self): backend = smtp.EmailBackend(ssl_certfile='bar') self.assertEqual(backend.ssl_certfile, 'bar') def test_email_ssl_certfile_default_disabled(self): backend = smtp.EmailBackend() self.assertIsNone(backend.ssl_certfile) @override_settings(EMAIL_SSL_KEYFILE='foo') def test_email_ssl_keyfile_use_settings(self): backend = smtp.EmailBackend() self.assertEqual(backend.ssl_keyfile, 'foo') @override_settings(EMAIL_SSL_KEYFILE='foo') def test_email_ssl_keyfile_override_settings(self): backend = smtp.EmailBackend(ssl_keyfile='bar') self.assertEqual(backend.ssl_keyfile, 'bar') def test_email_ssl_keyfile_default_disabled(self): backend = smtp.EmailBackend() self.assertIsNone(backend.ssl_keyfile) @override_settings(EMAIL_USE_TLS=True) def test_email_tls_attempts_starttls(self): backend = smtp.EmailBackend() self.assertTrue(backend.use_tls) with self.assertRaisesMessage(SMTPException, 'STARTTLS extension not supported by server.'): with backend: pass @override_settings(EMAIL_USE_SSL=True) def test_email_ssl_attempts_ssl_connection(self): backend = smtp.EmailBackend() self.assertTrue(backend.use_ssl) with self.assertRaises(SSLError): with backend: pass def test_connection_timeout_default(self): """The connection's timeout value is None by default.""" connection = mail.get_connection('django.core.mail.backends.smtp.EmailBackend') self.assertIsNone(connection.timeout) def test_connection_timeout_custom(self): """The timeout parameter can be customized.""" class MyEmailBackend(smtp.EmailBackend): def __init__(self, *args, **kwargs): kwargs.setdefault('timeout', 42) super().__init__(*args, **kwargs) myemailbackend = MyEmailBackend() myemailbackend.open() self.assertEqual(myemailbackend.timeout, 42) self.assertEqual(myemailbackend.connection.timeout, 42) myemailbackend.close() @override_settings(EMAIL_TIMEOUT=10) def test_email_timeout_override_settings(self): backend = smtp.EmailBackend() self.assertEqual(backend.timeout, 10) def test_email_msg_uses_crlf(self): """#23063 -- RFC-compliant messages are sent over SMTP.""" send = SMTP.send try: smtp_messages = [] def mock_send(self, s): smtp_messages.append(s) return send(self, s) SMTP.send = mock_send email = EmailMessage('Subject', 'Content', '[email protected]', ['[email protected]']) mail.get_connection().send_messages([email]) # Find the actual message msg = None for i, m in enumerate(smtp_messages): if m[:4] == 'data': msg = smtp_messages[i + 1] break self.assertTrue(msg) msg = msg.decode() # The message only contains CRLF and not combinations of CRLF, LF, and CR. msg = msg.replace('\r\n', '') self.assertNotIn('\r', msg) self.assertNotIn('\n', msg) finally: SMTP.send = send def test_send_messages_after_open_failed(self): """ send_messages() shouldn't try to send messages if open() raises an exception after initializing the connection. """ backend = smtp.EmailBackend() # Simulate connection initialization success and a subsequent # connection exception. backend.connection = True backend.open = lambda: None email = EmailMessage('Subject', 'Content', '[email protected]', ['[email protected]']) self.assertEqual(backend.send_messages([email]), 0) def test_send_messages_empty_list(self): backend = smtp.EmailBackend() backend.connection = True self.assertEqual(backend.send_messages([]), 0) def test_send_messages_zero_sent(self): """A message isn't sent if it doesn't have any recipients.""" backend = smtp.EmailBackend() backend.connection = True email = EmailMessage('Subject', 'Content', '[email protected]', to=[]) sent = backend.send_messages([email]) self.assertEqual(sent, 0) class SMTPBackendStoppedServerTests(SMTPBackendTestsBase): """ These tests require a separate class, because the FakeSMTPServer is shut down in setUpClass(), and it cannot be restarted ("RuntimeError: threads can only be started once"). """ @classmethod def setUpClass(cls): super().setUpClass() cls.backend = smtp.EmailBackend(username='', password='') cls.server.stop() def test_server_stopped(self): """ Closing the backend while the SMTP server is stopped doesn't raise an exception. """ self.backend.close() def test_fail_silently_on_connection_error(self): """ A socket connection error is silenced with fail_silently=True. """ with self.assertRaises(ConnectionError): self.backend.open() self.backend.fail_silently = True self.backend.open()
31b1b9509a6cf304ff7321f5f11e7464d07a274756baf11377de4dec67aafc14
from django.template.defaultfilters import filesizeformat from django.test import SimpleTestCase from django.utils import translation class FunctionTests(SimpleTestCase): def test_formats(self): tests = [ (1023, '1023\xa0bytes'), (1024, '1.0\xa0KB'), (10 * 1024, '10.0\xa0KB'), (1024 * 1024 - 1, '1024.0\xa0KB'), (1024 * 1024, '1.0\xa0MB'), (1024 * 1024 * 50, '50.0\xa0MB'), (1024 * 1024 * 1024 - 1, '1024.0\xa0MB'), (1024 * 1024 * 1024, '1.0\xa0GB'), (1024 * 1024 * 1024 * 1024, '1.0\xa0TB'), (1024 * 1024 * 1024 * 1024 * 1024, '1.0\xa0PB'), (1024 * 1024 * 1024 * 1024 * 1024 * 2000, '2000.0\xa0PB'), (complex(1, -1), '0\xa0bytes'), ('', '0\xa0bytes'), ('\N{GREEK SMALL LETTER ALPHA}', '0\xa0bytes'), ] for value, expected in tests: with self.subTest(value=value): self.assertEqual(filesizeformat(value), expected) def test_localized_formats(self): tests = [ (1023, '1023\xa0Bytes'), (1024, '1,0\xa0KB'), (10 * 1024, '10,0\xa0KB'), (1024 * 1024 - 1, '1024,0\xa0KB'), (1024 * 1024, '1,0\xa0MB'), (1024 * 1024 * 50, '50,0\xa0MB'), (1024 * 1024 * 1024 - 1, '1024,0\xa0MB'), (1024 * 1024 * 1024, '1,0\xa0GB'), (1024 * 1024 * 1024 * 1024, '1,0\xa0TB'), (1024 * 1024 * 1024 * 1024 * 1024, '1,0\xa0PB'), (1024 * 1024 * 1024 * 1024 * 1024 * 2000, '2000,0\xa0PB'), (complex(1, -1), '0\xa0Bytes'), ('', '0\xa0Bytes'), ('\N{GREEK SMALL LETTER ALPHA}', '0\xa0Bytes'), ] with self.settings(USE_L10N=True), translation.override('de'): for value, expected in tests: with self.subTest(value=value): self.assertEqual(filesizeformat(value), expected) def test_negative_numbers(self): tests = [ (-100, '-100\xa0bytes'), (-1024 * 1024 * 50, '-50.0\xa0MB'), ] for value, expected in tests: with self.subTest(value=value): self.assertEqual(filesizeformat(value), expected)
2ef06a743c03006ae6e90fd60c8c2e08df92f7a8132bafe794df1f803e36b837
import functools import itertools import logging import os import pathlib import signal import subprocess import sys import threading import time import traceback import weakref from collections import defaultdict from pathlib import Path from types import ModuleType from zipimport import zipimporter from django.apps import apps from django.core.signals import request_finished from django.dispatch import Signal from django.utils.functional import cached_property from django.utils.version import get_version_tuple autoreload_started = Signal() file_changed = Signal(providing_args=['file_path', 'kind']) DJANGO_AUTORELOAD_ENV = 'RUN_MAIN' logger = logging.getLogger('django.utils.autoreload') # If an error is raised while importing a file, it's not placed in sys.modules. # This means that any future modifications aren't caught. Keep a list of these # file paths to allow watching them in the future. _error_files = [] _exception = None try: import termios except ImportError: termios = None try: import pywatchman except ImportError: pywatchman = None def check_errors(fn): @functools.wraps(fn) def wrapper(*args, **kwargs): global _exception try: fn(*args, **kwargs) except Exception: _exception = sys.exc_info() et, ev, tb = _exception if getattr(ev, 'filename', None) is None: # get the filename from the last item in the stack filename = traceback.extract_tb(tb)[-1][0] else: filename = ev.filename if filename not in _error_files: _error_files.append(filename) raise return wrapper def raise_last_exception(): global _exception if _exception is not None: raise _exception[1] def ensure_echo_on(): """ Ensure that echo mode is enabled. Some tools such as PDB disable it which causes usability issues after reload. """ if not termios or not sys.stdin.isatty(): return attr_list = termios.tcgetattr(sys.stdin) if not attr_list[3] & termios.ECHO: attr_list[3] |= termios.ECHO if hasattr(signal, 'SIGTTOU'): old_handler = signal.signal(signal.SIGTTOU, signal.SIG_IGN) else: old_handler = None termios.tcsetattr(sys.stdin, termios.TCSANOW, attr_list) if old_handler is not None: signal.signal(signal.SIGTTOU, old_handler) def iter_all_python_module_files(): # This is a hot path during reloading. Create a stable sorted list of # modules based on the module name and pass it to iter_modules_and_files(). # This ensures cached results are returned in the usual case that modules # aren't loaded on the fly. keys = sorted(sys.modules) modules = tuple(m for m in map(sys.modules.__getitem__, keys) if not isinstance(m, weakref.ProxyTypes)) return iter_modules_and_files(modules, frozenset(_error_files)) @functools.lru_cache(maxsize=1) def iter_modules_and_files(modules, extra_files): """Iterate through all modules needed to be watched.""" sys_file_paths = [] for module in modules: # During debugging (with PyDev) the 'typing.io' and 'typing.re' objects # are added to sys.modules, however they are types not modules and so # cause issues here. if not isinstance(module, ModuleType): continue if module.__name__ == '__main__': # __main__ (usually manage.py) doesn't always have a __spec__ set. # Handle this by falling back to using __file__, resolved below. # See https://docs.python.org/reference/import.html#main-spec sys_file_paths.append(module.__file__) continue if getattr(module, '__spec__', None) is None: continue spec = module.__spec__ # Modules could be loaded from places without a concrete location. If # this is the case, skip them. if spec.has_location: origin = spec.loader.archive if isinstance(spec.loader, zipimporter) else spec.origin sys_file_paths.append(origin) results = set() for filename in itertools.chain(sys_file_paths, extra_files): if not filename: continue path = pathlib.Path(filename) try: resolved_path = path.resolve(strict=True).absolute() except FileNotFoundError: # The module could have been removed, don't fail loudly if this # is the case. continue results.add(resolved_path) return frozenset(results) @functools.lru_cache(maxsize=1) def common_roots(paths): """ Return a tuple of common roots that are shared between the given paths. File system watchers operate on directories and aren't cheap to create. Try to find the minimum set of directories to watch that encompass all of the files that need to be watched. """ # Inspired from Werkzeug: # https://github.com/pallets/werkzeug/blob/7477be2853df70a022d9613e765581b9411c3c39/werkzeug/_reloader.py # Create a sorted list of the path components, longest first. path_parts = sorted([x.parts for x in paths], key=len, reverse=True) tree = {} for chunks in path_parts: node = tree # Add each part of the path to the tree. for chunk in chunks: node = node.setdefault(chunk, {}) # Clear the last leaf in the tree. node.clear() # Turn the tree into a list of Path instances. def _walk(node, path): for prefix, child in node.items(): yield from _walk(child, path + (prefix,)) if not node: yield Path(*path) return tuple(_walk(tree, ())) def sys_path_directories(): """ Yield absolute directories from sys.path, ignoring entries that don't exist. """ for path in sys.path: path = Path(path) try: resolved_path = path.resolve(strict=True).absolute() except FileNotFoundError: continue # If the path is a file (like a zip file), watch the parent directory. if resolved_path.is_file(): yield resolved_path.parent else: yield resolved_path def get_child_arguments(): """ Return the executable. This contains a workaround for Windows if the executable is reported to not have the .exe extension which can cause bugs on reloading. """ import django.__main__ args = [sys.executable] + ['-W%s' % o for o in sys.warnoptions] if sys.argv[0] == django.__main__.__file__: # The server was started with `python -m django runserver`. args += ['-m', 'django'] args += sys.argv[1:] else: args += sys.argv return args def trigger_reload(filename): logger.info('%s changed, reloading.', filename) sys.exit(3) def restart_with_reloader(): new_environ = {**os.environ, DJANGO_AUTORELOAD_ENV: 'true'} args = get_child_arguments() while True: exit_code = subprocess.call(args, env=new_environ, close_fds=False) if exit_code != 3: return exit_code class BaseReloader: def __init__(self): self.extra_files = set() self.directory_globs = defaultdict(set) self._stop_condition = threading.Event() def watch_dir(self, path, glob): path = Path(path) if not path.is_absolute(): raise ValueError('%s must be absolute.' % path) logger.debug('Watching dir %s with glob %s.', path, glob) self.directory_globs[path].add(glob) def watch_file(self, path): path = Path(path) if not path.is_absolute(): raise ValueError('%s must be absolute.' % path) logger.debug('Watching file %s.', path) self.extra_files.add(path) def watched_files(self, include_globs=True): """ Yield all files that need to be watched, including module files and files within globs. """ yield from iter_all_python_module_files() yield from self.extra_files if include_globs: for directory, patterns in self.directory_globs.items(): for pattern in patterns: yield from directory.glob(pattern) def wait_for_apps_ready(self, app_reg, django_main_thread): """ Wait until Django reports that the apps have been loaded. If the given thread has terminated before the apps are ready, then a SyntaxError or other non-recoverable error has been raised. In that case, stop waiting for the apps_ready event and continue processing. Return True if the thread is alive and the ready event has been triggered, or False if the thread is terminated while waiting for the event. """ while django_main_thread.is_alive(): if app_reg.ready_event.wait(timeout=0.1): return True else: logger.debug('Main Django thread has terminated before apps are ready.') return False def run(self, django_main_thread): logger.debug('Waiting for apps ready_event.') self.wait_for_apps_ready(apps, django_main_thread) from django.urls import get_resolver # Prevent a race condition where URL modules aren't loaded when the # reloader starts by accessing the urlconf_module property. try: get_resolver().urlconf_module except Exception: # Loading the urlconf can result in errors during development. # If this occurs then swallow the error and continue. pass logger.debug('Apps ready_event triggered. Sending autoreload_started signal.') autoreload_started.send(sender=self) self.run_loop() def run_loop(self): ticker = self.tick() while not self.should_stop: try: next(ticker) except StopIteration: break self.stop() def tick(self): """ This generator is called in a loop from run_loop. It's important that the method takes care of pausing or otherwise waiting for a period of time. This split between run_loop() and tick() is to improve the testability of the reloader implementations by decoupling the work they do from the loop. """ raise NotImplementedError('subclasses must implement tick().') @classmethod def check_availability(cls): raise NotImplementedError('subclasses must implement check_availability().') def notify_file_changed(self, path): results = file_changed.send(sender=self, file_path=path) logger.debug('%s notified as changed. Signal results: %s.', path, results) if not any(res[1] for res in results): trigger_reload(path) # These are primarily used for testing. @property def should_stop(self): return self._stop_condition.is_set() def stop(self): self._stop_condition.set() class StatReloader(BaseReloader): SLEEP_TIME = 1 # Check for changes once per second. def tick(self): mtimes = {} while True: for filepath, mtime in self.snapshot_files(): old_time = mtimes.get(filepath) mtimes[filepath] = mtime if old_time is None: logger.debug('File %s first seen with mtime %s', filepath, mtime) continue elif mtime > old_time: logger.debug('File %s previous mtime: %s, current mtime: %s', filepath, old_time, mtime) self.notify_file_changed(filepath) time.sleep(self.SLEEP_TIME) yield def snapshot_files(self): # watched_files may produce duplicate paths if globs overlap. seen_files = set() for file in self.watched_files(): if file in seen_files: continue try: mtime = file.stat().st_mtime except OSError: # This is thrown when the file does not exist. continue seen_files.add(file) yield file, mtime @classmethod def check_availability(cls): return True class WatchmanUnavailable(RuntimeError): pass class WatchmanReloader(BaseReloader): def __init__(self): self.roots = defaultdict(set) self.processed_request = threading.Event() self.client_timeout = int(os.environ.get('DJANGO_WATCHMAN_TIMEOUT', 5)) super().__init__() @cached_property def client(self): return pywatchman.client(timeout=self.client_timeout) def _watch_root(self, root): # In practice this shouldn't occur, however, it's possible that a # directory that doesn't exist yet is being watched. If it's outside of # sys.path then this will end up a new root. How to handle this isn't # clear: Not adding the root will likely break when subscribing to the # changes, however, as this is currently an internal API, no files # will be being watched outside of sys.path. Fixing this by checking # inside watch_glob() and watch_dir() is expensive, instead this could # could fall back to the StatReloader if this case is detected? For # now, watching its parent, if possible, is sufficient. if not root.exists(): if not root.parent.exists(): logger.warning('Unable to watch root dir %s as neither it or its parent exist.', root) return root = root.parent result = self.client.query('watch-project', str(root.absolute())) if 'warning' in result: logger.warning('Watchman warning: %s', result['warning']) logger.debug('Watchman watch-project result: %s', result) return result['watch'], result.get('relative_path') @functools.lru_cache() def _get_clock(self, root): return self.client.query('clock', root)['clock'] def _subscribe(self, directory, name, expression): root, rel_path = self._watch_root(directory) query = { 'expression': expression, 'fields': ['name'], 'since': self._get_clock(root), 'dedup_results': True, } if rel_path: query['relative_root'] = rel_path logger.debug('Issuing watchman subscription %s, for root %s. Query: %s', name, root, query) self.client.query('subscribe', root, name, query) def _subscribe_dir(self, directory, filenames): if not directory.exists(): if not directory.parent.exists(): logger.warning('Unable to watch directory %s as neither it or its parent exist.', directory) return prefix = 'files-parent-%s' % directory.name filenames = ['%s/%s' % (directory.name, filename) for filename in filenames] directory = directory.parent expression = ['name', filenames, 'wholename'] else: prefix = 'files' expression = ['name', filenames] self._subscribe(directory, '%s:%s' % (prefix, directory), expression) def _watch_glob(self, directory, patterns): """ Watch a directory with a specific glob. If the directory doesn't yet exist, attempt to watch the parent directory and amend the patterns to include this. It's important this method isn't called more than one per directory when updating all subscriptions. Subsequent calls will overwrite the named subscription, so it must include all possible glob expressions. """ prefix = 'glob' if not directory.exists(): if not directory.parent.exists(): logger.warning('Unable to watch directory %s as neither it or its parent exist.', directory) return prefix = 'glob-parent-%s' % directory.name patterns = ['%s/%s' % (directory.name, pattern) for pattern in patterns] directory = directory.parent expression = ['anyof'] for pattern in patterns: expression.append(['match', pattern, 'wholename']) self._subscribe(directory, '%s:%s' % (prefix, directory), expression) def watched_roots(self, watched_files): extra_directories = self.directory_globs.keys() watched_file_dirs = [f.parent for f in watched_files] sys_paths = list(sys_path_directories()) return frozenset((*extra_directories, *watched_file_dirs, *sys_paths)) def _update_watches(self): watched_files = list(self.watched_files(include_globs=False)) found_roots = common_roots(self.watched_roots(watched_files)) logger.debug('Watching %s files', len(watched_files)) logger.debug('Found common roots: %s', found_roots) # Setup initial roots for performance, shortest roots first. for root in sorted(found_roots): self._watch_root(root) for directory, patterns in self.directory_globs.items(): self._watch_glob(directory, patterns) # Group sorted watched_files by their parent directory. sorted_files = sorted(watched_files, key=lambda p: p.parent) for directory, group in itertools.groupby(sorted_files, key=lambda p: p.parent): # These paths need to be relative to the parent directory. self._subscribe_dir(directory, [str(p.relative_to(directory)) for p in group]) def update_watches(self): try: self._update_watches() except Exception as ex: # If the service is still available, raise the original exception. if self.check_server_status(ex): raise def _check_subscription(self, sub): subscription = self.client.getSubscription(sub) if not subscription: return logger.debug('Watchman subscription %s has results.', sub) for result in subscription: # When using watch-project, it's not simple to get the relative # directory without storing some specific state. Store the full # path to the directory in the subscription name, prefixed by its # type (glob, files). root_directory = Path(result['subscription'].split(':', 1)[1]) logger.debug('Found root directory %s', root_directory) for file in result.get('files', []): self.notify_file_changed(root_directory / file) def request_processed(self, **kwargs): logger.debug('Request processed. Setting update_watches event.') self.processed_request.set() def tick(self): request_finished.connect(self.request_processed) self.update_watches() while True: if self.processed_request.is_set(): self.update_watches() self.processed_request.clear() try: self.client.receive() except pywatchman.SocketTimeout: pass except pywatchman.WatchmanError as ex: logger.debug('Watchman error: %s, checking server status.', ex) self.check_server_status(ex) else: for sub in list(self.client.subs.keys()): self._check_subscription(sub) yield def stop(self): self.client.close() super().stop() def check_server_status(self, inner_ex=None): """Return True if the server is available.""" try: self.client.query('version') except Exception: raise WatchmanUnavailable(str(inner_ex)) from inner_ex return True @classmethod def check_availability(cls): if not pywatchman: raise WatchmanUnavailable('pywatchman not installed.') client = pywatchman.client(timeout=0.1) try: result = client.capabilityCheck() except Exception: # The service is down? raise WatchmanUnavailable('Cannot connect to the watchman service.') version = get_version_tuple(result['version']) # Watchman 4.9 includes multiple improvements to watching project # directories as well as case insensitive filesystems. logger.debug('Watchman version %s', version) if version < (4, 9): raise WatchmanUnavailable('Watchman 4.9 or later is required.') def get_reloader(): """Return the most suitable reloader for this environment.""" try: WatchmanReloader.check_availability() except WatchmanUnavailable: return StatReloader() return WatchmanReloader() def start_django(reloader, main_func, *args, **kwargs): ensure_echo_on() main_func = check_errors(main_func) django_main_thread = threading.Thread(target=main_func, args=args, kwargs=kwargs, name='django-main-thread') django_main_thread.setDaemon(True) django_main_thread.start() while not reloader.should_stop: try: reloader.run(django_main_thread) except WatchmanUnavailable as ex: # It's possible that the watchman service shuts down or otherwise # becomes unavailable. In that case, use the StatReloader. reloader = StatReloader() logger.error('Error connecting to Watchman: %s', ex) logger.info('Watching for file changes with %s', reloader.__class__.__name__) def run_with_reloader(main_func, *args, **kwargs): signal.signal(signal.SIGTERM, lambda *args: sys.exit(0)) try: if os.environ.get(DJANGO_AUTORELOAD_ENV) == 'true': reloader = get_reloader() logger.info('Watching for file changes with %s', reloader.__class__.__name__) start_django(reloader, main_func, *args, **kwargs) else: exit_code = restart_with_reloader() sys.exit(exit_code) except KeyboardInterrupt: pass
d3139128f53dfc37415e163cb13eff2a7556b4b29f61b14cdec1cc35aceb13a0
""" This module converts requested URLs to callback view functions. URLResolver is the main class here. Its resolve() method takes a URL (as a string) and returns a ResolverMatch object which provides access to all attributes of the resolved URL match. """ import functools import inspect import re from importlib import import_module from urllib.parse import quote from asgiref.local import Local from django.conf import settings from django.core.checks import Error, Warning from django.core.checks.urls import check_resolver from django.core.exceptions import ImproperlyConfigured, ViewDoesNotExist from django.utils.datastructures import MultiValueDict from django.utils.functional import cached_property from django.utils.http import RFC3986_SUBDELIMS, escape_leading_slashes from django.utils.regex_helper import normalize from django.utils.translation import get_language from .converters import get_converter from .exceptions import NoReverseMatch, Resolver404 from .utils import get_callable class ResolverMatch: def __init__(self, func, args, kwargs, url_name=None, app_names=None, namespaces=None, route=None): self.func = func self.args = args self.kwargs = kwargs self.url_name = url_name self.route = route # If a URLRegexResolver doesn't have a namespace or app_name, it passes # in an empty value. self.app_names = [x for x in app_names if x] if app_names else [] self.app_name = ':'.join(self.app_names) self.namespaces = [x for x in namespaces if x] if namespaces else [] self.namespace = ':'.join(self.namespaces) if not hasattr(func, '__name__'): # A class-based view self._func_path = func.__class__.__module__ + '.' + func.__class__.__name__ else: # A function-based view self._func_path = func.__module__ + '.' + func.__name__ view_path = url_name or self._func_path self.view_name = ':'.join(self.namespaces + [view_path]) def __getitem__(self, index): return (self.func, self.args, self.kwargs)[index] def __repr__(self): return "ResolverMatch(func=%s, args=%s, kwargs=%s, url_name=%s, app_names=%s, namespaces=%s, route=%s)" % ( self._func_path, self.args, self.kwargs, self.url_name, self.app_names, self.namespaces, self.route, ) @functools.lru_cache(maxsize=None) def get_resolver(urlconf=None): if urlconf is None: urlconf = settings.ROOT_URLCONF return URLResolver(RegexPattern(r'^/'), urlconf) @functools.lru_cache(maxsize=None) def get_ns_resolver(ns_pattern, resolver, converters): # Build a namespaced resolver for the given parent URLconf pattern. # This makes it possible to have captured parameters in the parent # URLconf pattern. pattern = RegexPattern(ns_pattern) pattern.converters = dict(converters) ns_resolver = URLResolver(pattern, resolver.url_patterns) return URLResolver(RegexPattern(r'^/'), [ns_resolver]) class LocaleRegexDescriptor: def __init__(self, attr): self.attr = attr def __get__(self, instance, cls=None): """ Return a compiled regular expression based on the active language. """ if instance is None: return self # As a performance optimization, if the given regex string is a regular # string (not a lazily-translated string proxy), compile it once and # avoid per-language compilation. pattern = getattr(instance, self.attr) if isinstance(pattern, str): instance.__dict__['regex'] = instance._compile(pattern) return instance.__dict__['regex'] language_code = get_language() if language_code not in instance._regex_dict: instance._regex_dict[language_code] = instance._compile(str(pattern)) return instance._regex_dict[language_code] class CheckURLMixin: def describe(self): """ Format the URL pattern for display in warning messages. """ description = "'{}'".format(self) if self.name: description += " [name='{}']".format(self.name) return description def _check_pattern_startswith_slash(self): """ Check that the pattern does not begin with a forward slash. """ regex_pattern = self.regex.pattern if not settings.APPEND_SLASH: # Skip check as it can be useful to start a URL pattern with a slash # when APPEND_SLASH=False. return [] if regex_pattern.startswith(('/', '^/', '^\\/')) and not regex_pattern.endswith('/'): warning = Warning( "Your URL pattern {} has a route beginning with a '/'. Remove this " "slash as it is unnecessary. If this pattern is targeted in an " "include(), ensure the include() pattern has a trailing '/'.".format( self.describe() ), id="urls.W002", ) return [warning] else: return [] class RegexPattern(CheckURLMixin): regex = LocaleRegexDescriptor('_regex') def __init__(self, regex, name=None, is_endpoint=False): self._regex = regex self._regex_dict = {} self._is_endpoint = is_endpoint self.name = name self.converters = {} def match(self, path): match = self.regex.search(path) if match: # If there are any named groups, use those as kwargs, ignoring # non-named groups. Otherwise, pass all non-named arguments as # positional arguments. kwargs = match.groupdict() args = () if kwargs else match.groups() return path[match.end():], args, kwargs return None def check(self): warnings = [] warnings.extend(self._check_pattern_startswith_slash()) if not self._is_endpoint: warnings.extend(self._check_include_trailing_dollar()) return warnings def _check_include_trailing_dollar(self): regex_pattern = self.regex.pattern if regex_pattern.endswith('$') and not regex_pattern.endswith(r'\$'): return [Warning( "Your URL pattern {} uses include with a route ending with a '$'. " "Remove the dollar from the route to avoid problems including " "URLs.".format(self.describe()), id='urls.W001', )] else: return [] def _compile(self, regex): """Compile and return the given regular expression.""" try: return re.compile(regex) except re.error as e: raise ImproperlyConfigured( '"%s" is not a valid regular expression: %s' % (regex, e) ) def __str__(self): return str(self._regex) _PATH_PARAMETER_COMPONENT_RE = re.compile( r'<(?:(?P<converter>[^>:]+):)?(?P<parameter>\w+)>' ) def _route_to_regex(route, is_endpoint=False): """ Convert a path pattern into a regular expression. Return the regular expression and a dictionary mapping the capture names to the converters. For example, 'foo/<int:pk>' returns '^foo\\/(?P<pk>[0-9]+)' and {'pk': <django.urls.converters.IntConverter>}. """ original_route = route parts = ['^'] converters = {} while True: match = _PATH_PARAMETER_COMPONENT_RE.search(route) if not match: parts.append(re.escape(route)) break parts.append(re.escape(route[:match.start()])) route = route[match.end():] parameter = match.group('parameter') if not parameter.isidentifier(): raise ImproperlyConfigured( "URL route '%s' uses parameter name %r which isn't a valid " "Python identifier." % (original_route, parameter) ) raw_converter = match.group('converter') if raw_converter is None: # If a converter isn't specified, the default is `str`. raw_converter = 'str' try: converter = get_converter(raw_converter) except KeyError as e: raise ImproperlyConfigured( "URL route '%s' uses invalid converter %s." % (original_route, e) ) converters[parameter] = converter parts.append('(?P<' + parameter + '>' + converter.regex + ')') if is_endpoint: parts.append('$') return ''.join(parts), converters class RoutePattern(CheckURLMixin): regex = LocaleRegexDescriptor('_route') def __init__(self, route, name=None, is_endpoint=False): self._route = route self._regex_dict = {} self._is_endpoint = is_endpoint self.name = name self.converters = _route_to_regex(str(route), is_endpoint)[1] def match(self, path): match = self.regex.search(path) if match: # RoutePattern doesn't allow non-named groups so args are ignored. kwargs = match.groupdict() for key, value in kwargs.items(): converter = self.converters[key] try: kwargs[key] = converter.to_python(value) except ValueError: return None return path[match.end():], (), kwargs return None def check(self): warnings = self._check_pattern_startswith_slash() route = self._route if '(?P<' in route or route.startswith('^') or route.endswith('$'): warnings.append(Warning( "Your URL pattern {} has a route that contains '(?P<', begins " "with a '^', or ends with a '$'. This was likely an oversight " "when migrating to django.urls.path().".format(self.describe()), id='2_0.W001', )) return warnings def _compile(self, route): return re.compile(_route_to_regex(route, self._is_endpoint)[0]) def __str__(self): return str(self._route) class LocalePrefixPattern: def __init__(self, prefix_default_language=True): self.prefix_default_language = prefix_default_language self.converters = {} @property def regex(self): # This is only used by reverse() and cached in _reverse_dict. return re.compile(self.language_prefix) @property def language_prefix(self): language_code = get_language() or settings.LANGUAGE_CODE if language_code == settings.LANGUAGE_CODE and not self.prefix_default_language: return '' else: return '%s/' % language_code def match(self, path): language_prefix = self.language_prefix if path.startswith(language_prefix): return path[len(language_prefix):], (), {} return None def check(self): return [] def describe(self): return "'{}'".format(self) def __str__(self): return self.language_prefix class URLPattern: def __init__(self, pattern, callback, default_args=None, name=None): self.pattern = pattern self.callback = callback # the view self.default_args = default_args or {} self.name = name def __repr__(self): return '<%s %s>' % (self.__class__.__name__, self.pattern.describe()) def check(self): warnings = self._check_pattern_name() warnings.extend(self.pattern.check()) return warnings def _check_pattern_name(self): """ Check that the pattern name does not contain a colon. """ if self.pattern.name is not None and ":" in self.pattern.name: warning = Warning( "Your URL pattern {} has a name including a ':'. Remove the colon, to " "avoid ambiguous namespace references.".format(self.pattern.describe()), id="urls.W003", ) return [warning] else: return [] def resolve(self, path): match = self.pattern.match(path) if match: new_path, args, kwargs = match # Pass any extra_kwargs as **kwargs. kwargs.update(self.default_args) return ResolverMatch(self.callback, args, kwargs, self.pattern.name, route=str(self.pattern)) @cached_property def lookup_str(self): """ A string that identifies the view (e.g. 'path.to.view_function' or 'path.to.ClassBasedView'). """ callback = self.callback if isinstance(callback, functools.partial): callback = callback.func if not hasattr(callback, '__name__'): return callback.__module__ + "." + callback.__class__.__name__ return callback.__module__ + "." + callback.__qualname__ class URLResolver: def __init__(self, pattern, urlconf_name, default_kwargs=None, app_name=None, namespace=None): self.pattern = pattern # urlconf_name is the dotted Python path to the module defining # urlpatterns. It may also be an object with an urlpatterns attribute # or urlpatterns itself. self.urlconf_name = urlconf_name self.callback = None self.default_kwargs = default_kwargs or {} self.namespace = namespace self.app_name = app_name self._reverse_dict = {} self._namespace_dict = {} self._app_dict = {} # set of dotted paths to all functions and classes that are used in # urlpatterns self._callback_strs = set() self._populated = False self._local = Local() def __repr__(self): if isinstance(self.urlconf_name, list) and self.urlconf_name: # Don't bother to output the whole list, it can be huge urlconf_repr = '<%s list>' % self.urlconf_name[0].__class__.__name__ else: urlconf_repr = repr(self.urlconf_name) return '<%s %s (%s:%s) %s>' % ( self.__class__.__name__, urlconf_repr, self.app_name, self.namespace, self.pattern.describe(), ) def check(self): messages = [] for pattern in self.url_patterns: messages.extend(check_resolver(pattern)) messages.extend(self._check_custom_error_handlers()) return messages or self.pattern.check() def _check_custom_error_handlers(self): messages = [] # All handlers take (request, exception) arguments except handler500 # which takes (request). for status_code, num_parameters in [(400, 2), (403, 2), (404, 2), (500, 1)]: try: handler, param_dict = self.resolve_error_handler(status_code) except (ImportError, ViewDoesNotExist) as e: path = getattr(self.urlconf_module, 'handler%s' % status_code) msg = ( "The custom handler{status_code} view '{path}' could not be imported." ).format(status_code=status_code, path=path) messages.append(Error(msg, hint=str(e), id='urls.E008')) continue signature = inspect.signature(handler) args = [None] * num_parameters try: signature.bind(*args) except TypeError: msg = ( "The custom handler{status_code} view '{path}' does not " "take the correct number of arguments ({args})." ).format( status_code=status_code, path=handler.__module__ + '.' + handler.__qualname__, args='request, exception' if num_parameters == 2 else 'request', ) messages.append(Error(msg, id='urls.E007')) return messages def _populate(self): # Short-circuit if called recursively in this thread to prevent # infinite recursion. Concurrent threads may call this at the same # time and will need to continue, so set 'populating' on a # thread-local variable. if getattr(self._local, 'populating', False): return try: self._local.populating = True lookups = MultiValueDict() namespaces = {} apps = {} language_code = get_language() for url_pattern in reversed(self.url_patterns): p_pattern = url_pattern.pattern.regex.pattern if p_pattern.startswith('^'): p_pattern = p_pattern[1:] if isinstance(url_pattern, URLPattern): self._callback_strs.add(url_pattern.lookup_str) bits = normalize(url_pattern.pattern.regex.pattern) lookups.appendlist( url_pattern.callback, (bits, p_pattern, url_pattern.default_args, url_pattern.pattern.converters) ) if url_pattern.name is not None: lookups.appendlist( url_pattern.name, (bits, p_pattern, url_pattern.default_args, url_pattern.pattern.converters) ) else: # url_pattern is a URLResolver. url_pattern._populate() if url_pattern.app_name: apps.setdefault(url_pattern.app_name, []).append(url_pattern.namespace) namespaces[url_pattern.namespace] = (p_pattern, url_pattern) else: for name in url_pattern.reverse_dict: for matches, pat, defaults, converters in url_pattern.reverse_dict.getlist(name): new_matches = normalize(p_pattern + pat) lookups.appendlist( name, ( new_matches, p_pattern + pat, {**defaults, **url_pattern.default_kwargs}, {**self.pattern.converters, **url_pattern.pattern.converters, **converters} ) ) for namespace, (prefix, sub_pattern) in url_pattern.namespace_dict.items(): current_converters = url_pattern.pattern.converters sub_pattern.pattern.converters.update(current_converters) namespaces[namespace] = (p_pattern + prefix, sub_pattern) for app_name, namespace_list in url_pattern.app_dict.items(): apps.setdefault(app_name, []).extend(namespace_list) self._callback_strs.update(url_pattern._callback_strs) self._namespace_dict[language_code] = namespaces self._app_dict[language_code] = apps self._reverse_dict[language_code] = lookups self._populated = True finally: self._local.populating = False @property def reverse_dict(self): language_code = get_language() if language_code not in self._reverse_dict: self._populate() return self._reverse_dict[language_code] @property def namespace_dict(self): language_code = get_language() if language_code not in self._namespace_dict: self._populate() return self._namespace_dict[language_code] @property def app_dict(self): language_code = get_language() if language_code not in self._app_dict: self._populate() return self._app_dict[language_code] @staticmethod def _join_route(route1, route2): """Join two routes, without the starting ^ in the second route.""" if not route1: return route2 if route2.startswith('^'): route2 = route2[1:] return route1 + route2 def _is_callback(self, name): if not self._populated: self._populate() return name in self._callback_strs def resolve(self, path): path = str(path) # path may be a reverse_lazy object tried = [] match = self.pattern.match(path) if match: new_path, args, kwargs = match for pattern in self.url_patterns: try: sub_match = pattern.resolve(new_path) except Resolver404 as e: sub_tried = e.args[0].get('tried') if sub_tried is not None: tried.extend([pattern] + t for t in sub_tried) else: tried.append([pattern]) else: if sub_match: # Merge captured arguments in match with submatch sub_match_dict = {**kwargs, **self.default_kwargs} # Update the sub_match_dict with the kwargs from the sub_match. sub_match_dict.update(sub_match.kwargs) # If there are *any* named groups, ignore all non-named groups. # Otherwise, pass all non-named arguments as positional arguments. sub_match_args = sub_match.args if not sub_match_dict: sub_match_args = args + sub_match.args current_route = '' if isinstance(pattern, URLPattern) else str(pattern.pattern) return ResolverMatch( sub_match.func, sub_match_args, sub_match_dict, sub_match.url_name, [self.app_name] + sub_match.app_names, [self.namespace] + sub_match.namespaces, self._join_route(current_route, sub_match.route), ) tried.append([pattern]) raise Resolver404({'tried': tried, 'path': new_path}) raise Resolver404({'path': path}) @cached_property def urlconf_module(self): if isinstance(self.urlconf_name, str): return import_module(self.urlconf_name) else: return self.urlconf_name @cached_property def url_patterns(self): # urlconf_module might be a valid set of patterns, so we default to it patterns = getattr(self.urlconf_module, "urlpatterns", self.urlconf_module) try: iter(patterns) except TypeError: msg = ( "The included URLconf '{name}' does not appear to have any " "patterns in it. If you see valid patterns in the file then " "the issue is probably caused by a circular import." ) raise ImproperlyConfigured(msg.format(name=self.urlconf_name)) return patterns def resolve_error_handler(self, view_type): callback = getattr(self.urlconf_module, 'handler%s' % view_type, None) if not callback: # No handler specified in file; use lazy import, since # django.conf.urls imports this file. from django.conf import urls callback = getattr(urls, 'handler%s' % view_type) return get_callable(callback), {} def reverse(self, lookup_view, *args, **kwargs): return self._reverse_with_prefix(lookup_view, '', *args, **kwargs) def _reverse_with_prefix(self, lookup_view, _prefix, *args, **kwargs): if args and kwargs: raise ValueError("Don't mix *args and **kwargs in call to reverse()!") if not self._populated: self._populate() possibilities = self.reverse_dict.getlist(lookup_view) for possibility, pattern, defaults, converters in possibilities: for result, params in possibility: if args: if len(args) != len(params): continue candidate_subs = dict(zip(params, args)) else: if set(kwargs).symmetric_difference(params).difference(defaults): continue if any(kwargs.get(k, v) != v for k, v in defaults.items()): continue candidate_subs = kwargs # Convert the candidate subs to text using Converter.to_url(). text_candidate_subs = {} for k, v in candidate_subs.items(): if k in converters: text_candidate_subs[k] = converters[k].to_url(v) else: text_candidate_subs[k] = str(v) # WSGI provides decoded URLs, without %xx escapes, and the URL # resolver operates on such URLs. First substitute arguments # without quoting to build a decoded URL and look for a match. # Then, if we have a match, redo the substitution with quoted # arguments in order to return a properly encoded URL. candidate_pat = _prefix.replace('%', '%%') + result if re.search('^%s%s' % (re.escape(_prefix), pattern), candidate_pat % text_candidate_subs): # safe characters from `pchar` definition of RFC 3986 url = quote(candidate_pat % text_candidate_subs, safe=RFC3986_SUBDELIMS + '/~:@') # Don't allow construction of scheme relative urls. return escape_leading_slashes(url) # lookup_view can be URL name or callable, but callables are not # friendly in error messages. m = getattr(lookup_view, '__module__', None) n = getattr(lookup_view, '__name__', None) if m is not None and n is not None: lookup_view_s = "%s.%s" % (m, n) else: lookup_view_s = lookup_view patterns = [pattern for (_, pattern, _, _) in possibilities] if patterns: if args: arg_msg = "arguments '%s'" % (args,) elif kwargs: arg_msg = "keyword arguments '%s'" % (kwargs,) else: arg_msg = "no arguments" msg = ( "Reverse for '%s' with %s not found. %d pattern(s) tried: %s" % (lookup_view_s, arg_msg, len(patterns), patterns) ) else: msg = ( "Reverse for '%(view)s' not found. '%(view)s' is not " "a valid view function or pattern name." % {'view': lookup_view_s} ) raise NoReverseMatch(msg)
8c72e722f2f495a2a135802ee1423d2eb4e840165be3a0ffb791652851c65911
""" Helper functions for creating Form classes from Django models and database field objects. """ from itertools import chain from django.core.exceptions import ( NON_FIELD_ERRORS, FieldError, ImproperlyConfigured, ValidationError, ) from django.forms.fields import ChoiceField, Field from django.forms.forms import BaseForm, DeclarativeFieldsMetaclass from django.forms.formsets import BaseFormSet, formset_factory from django.forms.utils import ErrorList from django.forms.widgets import ( HiddenInput, MultipleHiddenInput, SelectMultiple, ) from django.utils.text import capfirst, get_text_list from django.utils.translation import gettext, gettext_lazy as _ __all__ = ( 'ModelForm', 'BaseModelForm', 'model_to_dict', 'fields_for_model', 'ModelChoiceField', 'ModelMultipleChoiceField', 'ALL_FIELDS', 'BaseModelFormSet', 'modelformset_factory', 'BaseInlineFormSet', 'inlineformset_factory', 'modelform_factory', ) ALL_FIELDS = '__all__' def construct_instance(form, instance, fields=None, exclude=None): """ Construct and return a model instance from the bound ``form``'s ``cleaned_data``, but do not save the returned instance to the database. """ from django.db import models opts = instance._meta cleaned_data = form.cleaned_data file_field_list = [] for f in opts.fields: if not f.editable or isinstance(f, models.AutoField) \ or f.name not in cleaned_data: continue if fields is not None and f.name not in fields: continue if exclude and f.name in exclude: continue # Leave defaults for fields that aren't in POST data, except for # checkbox inputs because they don't appear in POST data if not checked. if ( f.has_default() and form[f.name].field.widget.value_omitted_from_data(form.data, form.files, form.add_prefix(f.name)) and cleaned_data.get(f.name) in form[f.name].field.empty_values ): continue # Defer saving file-type fields until after the other fields, so a # callable upload_to can use the values from other fields. if isinstance(f, models.FileField): file_field_list.append(f) else: f.save_form_data(instance, cleaned_data[f.name]) for f in file_field_list: f.save_form_data(instance, cleaned_data[f.name]) return instance # ModelForms ################################################################# def model_to_dict(instance, fields=None, exclude=None): """ Return a dict containing the data in ``instance`` suitable for passing as a Form's ``initial`` keyword argument. ``fields`` is an optional list of field names. If provided, return only the named. ``exclude`` is an optional list of field names. If provided, exclude the named from the returned dict, even if they are listed in the ``fields`` argument. """ opts = instance._meta data = {} for f in chain(opts.concrete_fields, opts.private_fields, opts.many_to_many): if not getattr(f, 'editable', False): continue if fields is not None and f.name not in fields: continue if exclude and f.name in exclude: continue data[f.name] = f.value_from_object(instance) return data def apply_limit_choices_to_to_formfield(formfield): """Apply limit_choices_to to the formfield's queryset if needed.""" if hasattr(formfield, 'queryset') and hasattr(formfield, 'get_limit_choices_to'): limit_choices_to = formfield.get_limit_choices_to() if limit_choices_to is not None: formfield.queryset = formfield.queryset.complex_filter(limit_choices_to) def fields_for_model(model, fields=None, exclude=None, widgets=None, formfield_callback=None, localized_fields=None, labels=None, help_texts=None, error_messages=None, field_classes=None, *, apply_limit_choices_to=True): """ Return a dictionary containing form fields for the given model. ``fields`` is an optional list of field names. If provided, return only the named fields. ``exclude`` is an optional list of field names. If provided, exclude the named fields from the returned fields, even if they are listed in the ``fields`` argument. ``widgets`` is a dictionary of model field names mapped to a widget. ``formfield_callback`` is a callable that takes a model field and returns a form field. ``localized_fields`` is a list of names of fields which should be localized. ``labels`` is a dictionary of model field names mapped to a label. ``help_texts`` is a dictionary of model field names mapped to a help text. ``error_messages`` is a dictionary of model field names mapped to a dictionary of error messages. ``field_classes`` is a dictionary of model field names mapped to a form field class. ``apply_limit_choices_to`` is a boolean indicating if limit_choices_to should be applied to a field's queryset. """ field_dict = {} ignored = [] opts = model._meta # Avoid circular import from django.db.models.fields import Field as ModelField sortable_private_fields = [f for f in opts.private_fields if isinstance(f, ModelField)] for f in sorted(chain(opts.concrete_fields, sortable_private_fields, opts.many_to_many)): if not getattr(f, 'editable', False): if (fields is not None and f.name in fields and (exclude is None or f.name not in exclude)): raise FieldError( "'%s' cannot be specified for %s model form as it is a non-editable field" % ( f.name, model.__name__) ) continue if fields is not None and f.name not in fields: continue if exclude and f.name in exclude: continue kwargs = {} if widgets and f.name in widgets: kwargs['widget'] = widgets[f.name] if localized_fields == ALL_FIELDS or (localized_fields and f.name in localized_fields): kwargs['localize'] = True if labels and f.name in labels: kwargs['label'] = labels[f.name] if help_texts and f.name in help_texts: kwargs['help_text'] = help_texts[f.name] if error_messages and f.name in error_messages: kwargs['error_messages'] = error_messages[f.name] if field_classes and f.name in field_classes: kwargs['form_class'] = field_classes[f.name] if formfield_callback is None: formfield = f.formfield(**kwargs) elif not callable(formfield_callback): raise TypeError('formfield_callback must be a function or callable') else: formfield = formfield_callback(f, **kwargs) if formfield: if apply_limit_choices_to: apply_limit_choices_to_to_formfield(formfield) field_dict[f.name] = formfield else: ignored.append(f.name) if fields: field_dict = { f: field_dict.get(f) for f in fields if (not exclude or f not in exclude) and f not in ignored } return field_dict class ModelFormOptions: def __init__(self, options=None): self.model = getattr(options, 'model', None) self.fields = getattr(options, 'fields', None) self.exclude = getattr(options, 'exclude', None) self.widgets = getattr(options, 'widgets', None) self.localized_fields = getattr(options, 'localized_fields', None) self.labels = getattr(options, 'labels', None) self.help_texts = getattr(options, 'help_texts', None) self.error_messages = getattr(options, 'error_messages', None) self.field_classes = getattr(options, 'field_classes', None) class ModelFormMetaclass(DeclarativeFieldsMetaclass): def __new__(mcs, name, bases, attrs): base_formfield_callback = None for b in bases: if hasattr(b, 'Meta') and hasattr(b.Meta, 'formfield_callback'): base_formfield_callback = b.Meta.formfield_callback break formfield_callback = attrs.pop('formfield_callback', base_formfield_callback) new_class = super(ModelFormMetaclass, mcs).__new__(mcs, name, bases, attrs) if bases == (BaseModelForm,): return new_class opts = new_class._meta = ModelFormOptions(getattr(new_class, 'Meta', None)) # We check if a string was passed to `fields` or `exclude`, # which is likely to be a mistake where the user typed ('foo') instead # of ('foo',) for opt in ['fields', 'exclude', 'localized_fields']: value = getattr(opts, opt) if isinstance(value, str) and value != ALL_FIELDS: msg = ("%(model)s.Meta.%(opt)s cannot be a string. " "Did you mean to type: ('%(value)s',)?" % { 'model': new_class.__name__, 'opt': opt, 'value': value, }) raise TypeError(msg) if opts.model: # If a model is defined, extract form fields from it. if opts.fields is None and opts.exclude is None: raise ImproperlyConfigured( "Creating a ModelForm without either the 'fields' attribute " "or the 'exclude' attribute is prohibited; form %s " "needs updating." % name ) if opts.fields == ALL_FIELDS: # Sentinel for fields_for_model to indicate "get the list of # fields from the model" opts.fields = None fields = fields_for_model( opts.model, opts.fields, opts.exclude, opts.widgets, formfield_callback, opts.localized_fields, opts.labels, opts.help_texts, opts.error_messages, opts.field_classes, # limit_choices_to will be applied during ModelForm.__init__(). apply_limit_choices_to=False, ) # make sure opts.fields doesn't specify an invalid field none_model_fields = {k for k, v in fields.items() if not v} missing_fields = none_model_fields.difference(new_class.declared_fields) if missing_fields: message = 'Unknown field(s) (%s) specified for %s' message = message % (', '.join(missing_fields), opts.model.__name__) raise FieldError(message) # Override default model fields with any custom declared ones # (plus, include all the other declared fields). fields.update(new_class.declared_fields) else: fields = new_class.declared_fields new_class.base_fields = fields return new_class class BaseModelForm(BaseForm): def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None, initial=None, error_class=ErrorList, label_suffix=None, empty_permitted=False, instance=None, use_required_attribute=None, renderer=None): opts = self._meta if opts.model is None: raise ValueError('ModelForm has no model class specified.') if instance is None: # if we didn't get an instance, instantiate a new one self.instance = opts.model() object_data = {} else: self.instance = instance object_data = model_to_dict(instance, opts.fields, opts.exclude) # if initial was provided, it should override the values from instance if initial is not None: object_data.update(initial) # self._validate_unique will be set to True by BaseModelForm.clean(). # It is False by default so overriding self.clean() and failing to call # super will stop validate_unique from being called. self._validate_unique = False super().__init__( data, files, auto_id, prefix, object_data, error_class, label_suffix, empty_permitted, use_required_attribute=use_required_attribute, renderer=renderer, ) for formfield in self.fields.values(): apply_limit_choices_to_to_formfield(formfield) def _get_validation_exclusions(self): """ For backwards-compatibility, exclude several types of fields from model validation. See tickets #12507, #12521, #12553. """ exclude = [] # Build up a list of fields that should be excluded from model field # validation and unique checks. for f in self.instance._meta.fields: field = f.name # Exclude fields that aren't on the form. The developer may be # adding these values to the model after form validation. if field not in self.fields: exclude.append(f.name) # Don't perform model validation on fields that were defined # manually on the form and excluded via the ModelForm's Meta # class. See #12901. elif self._meta.fields and field not in self._meta.fields: exclude.append(f.name) elif self._meta.exclude and field in self._meta.exclude: exclude.append(f.name) # Exclude fields that failed form validation. There's no need for # the model fields to validate them as well. elif field in self._errors: exclude.append(f.name) # Exclude empty fields that are not required by the form, if the # underlying model field is required. This keeps the model field # from raising a required error. Note: don't exclude the field from # validation if the model field allows blanks. If it does, the blank # value may be included in a unique check, so cannot be excluded # from validation. else: form_field = self.fields[field] field_value = self.cleaned_data.get(field) if not f.blank and not form_field.required and field_value in form_field.empty_values: exclude.append(f.name) return exclude def clean(self): self._validate_unique = True return self.cleaned_data def _update_errors(self, errors): # Override any validation error messages defined at the model level # with those defined at the form level. opts = self._meta # Allow the model generated by construct_instance() to raise # ValidationError and have them handled in the same way as others. if hasattr(errors, 'error_dict'): error_dict = errors.error_dict else: error_dict = {NON_FIELD_ERRORS: errors} for field, messages in error_dict.items(): if (field == NON_FIELD_ERRORS and opts.error_messages and NON_FIELD_ERRORS in opts.error_messages): error_messages = opts.error_messages[NON_FIELD_ERRORS] elif field in self.fields: error_messages = self.fields[field].error_messages else: continue for message in messages: if (isinstance(message, ValidationError) and message.code in error_messages): message.message = error_messages[message.code] self.add_error(None, errors) def _post_clean(self): opts = self._meta exclude = self._get_validation_exclusions() # Foreign Keys being used to represent inline relationships # are excluded from basic field value validation. This is for two # reasons: firstly, the value may not be supplied (#12507; the # case of providing new values to the admin); secondly the # object being referred to may not yet fully exist (#12749). # However, these fields *must* be included in uniqueness checks, # so this can't be part of _get_validation_exclusions(). for name, field in self.fields.items(): if isinstance(field, InlineForeignKeyField): exclude.append(name) try: self.instance = construct_instance(self, self.instance, opts.fields, opts.exclude) except ValidationError as e: self._update_errors(e) try: self.instance.full_clean(exclude=exclude, validate_unique=False) except ValidationError as e: self._update_errors(e) # Validate uniqueness if needed. if self._validate_unique: self.validate_unique() def validate_unique(self): """ Call the instance's validate_unique() method and update the form's validation errors if any were raised. """ exclude = self._get_validation_exclusions() try: self.instance.validate_unique(exclude=exclude) except ValidationError as e: self._update_errors(e) def _save_m2m(self): """ Save the many-to-many fields and generic relations for this form. """ cleaned_data = self.cleaned_data exclude = self._meta.exclude fields = self._meta.fields opts = self.instance._meta # Note that for historical reasons we want to include also # private_fields here. (GenericRelation was previously a fake # m2m field). for f in chain(opts.many_to_many, opts.private_fields): if not hasattr(f, 'save_form_data'): continue if fields and f.name not in fields: continue if exclude and f.name in exclude: continue if f.name in cleaned_data: f.save_form_data(self.instance, cleaned_data[f.name]) def save(self, commit=True): """ Save this form's self.instance object if commit=True. Otherwise, add a save_m2m() method to the form which can be called after the instance is saved manually at a later time. Return the model instance. """ if self.errors: raise ValueError( "The %s could not be %s because the data didn't validate." % ( self.instance._meta.object_name, 'created' if self.instance._state.adding else 'changed', ) ) if commit: # If committing, save the instance and the m2m data immediately. self.instance.save() self._save_m2m() else: # If not committing, add a method to the form to allow deferred # saving of m2m data. self.save_m2m = self._save_m2m return self.instance save.alters_data = True class ModelForm(BaseModelForm, metaclass=ModelFormMetaclass): pass def modelform_factory(model, form=ModelForm, fields=None, exclude=None, formfield_callback=None, widgets=None, localized_fields=None, labels=None, help_texts=None, error_messages=None, field_classes=None): """ Return a ModelForm containing form fields for the given model. You can optionally pass a `form` argument to use as a starting point for constructing the ModelForm. ``fields`` is an optional list of field names. If provided, include only the named fields in the returned fields. If omitted or '__all__', use all fields. ``exclude`` is an optional list of field names. If provided, exclude the named fields from the returned fields, even if they are listed in the ``fields`` argument. ``widgets`` is a dictionary of model field names mapped to a widget. ``localized_fields`` is a list of names of fields which should be localized. ``formfield_callback`` is a callable that takes a model field and returns a form field. ``labels`` is a dictionary of model field names mapped to a label. ``help_texts`` is a dictionary of model field names mapped to a help text. ``error_messages`` is a dictionary of model field names mapped to a dictionary of error messages. ``field_classes`` is a dictionary of model field names mapped to a form field class. """ # Create the inner Meta class. FIXME: ideally, we should be able to # construct a ModelForm without creating and passing in a temporary # inner class. # Build up a list of attributes that the Meta object will have. attrs = {'model': model} if fields is not None: attrs['fields'] = fields if exclude is not None: attrs['exclude'] = exclude if widgets is not None: attrs['widgets'] = widgets if localized_fields is not None: attrs['localized_fields'] = localized_fields if labels is not None: attrs['labels'] = labels if help_texts is not None: attrs['help_texts'] = help_texts if error_messages is not None: attrs['error_messages'] = error_messages if field_classes is not None: attrs['field_classes'] = field_classes # If parent form class already has an inner Meta, the Meta we're # creating needs to inherit from the parent's inner meta. bases = (form.Meta,) if hasattr(form, 'Meta') else () Meta = type('Meta', bases, attrs) if formfield_callback: Meta.formfield_callback = staticmethod(formfield_callback) # Give this new form class a reasonable name. class_name = model.__name__ + 'Form' # Class attributes for the new form class. form_class_attrs = { 'Meta': Meta, 'formfield_callback': formfield_callback } if (getattr(Meta, 'fields', None) is None and getattr(Meta, 'exclude', None) is None): raise ImproperlyConfigured( "Calling modelform_factory without defining 'fields' or " "'exclude' explicitly is prohibited." ) # Instantiate type(form) in order to use the same metaclass as form. return type(form)(class_name, (form,), form_class_attrs) # ModelFormSets ############################################################## class BaseModelFormSet(BaseFormSet): """ A ``FormSet`` for editing a queryset and/or adding new objects to it. """ model = None # Set of fields that must be unique among forms of this set. unique_fields = set() def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None, queryset=None, *, initial=None, **kwargs): self.queryset = queryset self.initial_extra = initial super().__init__(**{'data': data, 'files': files, 'auto_id': auto_id, 'prefix': prefix, **kwargs}) def initial_form_count(self): """Return the number of forms that are required in this FormSet.""" if not self.is_bound: return len(self.get_queryset()) return super().initial_form_count() def _existing_object(self, pk): if not hasattr(self, '_object_dict'): self._object_dict = {o.pk: o for o in self.get_queryset()} return self._object_dict.get(pk) def _get_to_python(self, field): """ If the field is a related field, fetch the concrete field's (that is, the ultimate pointed-to field's) to_python. """ while field.remote_field is not None: field = field.remote_field.get_related_field() return field.to_python def _construct_form(self, i, **kwargs): pk_required = i < self.initial_form_count() if pk_required: if self.is_bound: pk_key = '%s-%s' % (self.add_prefix(i), self.model._meta.pk.name) try: pk = self.data[pk_key] except KeyError: # The primary key is missing. The user may have tampered # with POST data. pass else: to_python = self._get_to_python(self.model._meta.pk) try: pk = to_python(pk) except ValidationError: # The primary key exists but is an invalid value. The # user may have tampered with POST data. pass else: kwargs['instance'] = self._existing_object(pk) else: kwargs['instance'] = self.get_queryset()[i] elif self.initial_extra: # Set initial values for extra forms try: kwargs['initial'] = self.initial_extra[i - self.initial_form_count()] except IndexError: pass form = super()._construct_form(i, **kwargs) if pk_required: form.fields[self.model._meta.pk.name].required = True return form def get_queryset(self): if not hasattr(self, '_queryset'): if self.queryset is not None: qs = self.queryset else: qs = self.model._default_manager.get_queryset() # If the queryset isn't already ordered we need to add an # artificial ordering here to make sure that all formsets # constructed from this queryset have the same form order. if not qs.ordered: qs = qs.order_by(self.model._meta.pk.name) # Removed queryset limiting here. As per discussion re: #13023 # on django-dev, max_num should not prevent existing # related objects/inlines from being displayed. self._queryset = qs return self._queryset def save_new(self, form, commit=True): """Save and return a new model instance for the given form.""" return form.save(commit=commit) def save_existing(self, form, instance, commit=True): """Save and return an existing model instance for the given form.""" return form.save(commit=commit) def delete_existing(self, obj, commit=True): """Deletes an existing model instance.""" if commit: obj.delete() def save(self, commit=True): """ Save model instances for every form, adding and changing instances as necessary, and return the list of instances. """ if not commit: self.saved_forms = [] def save_m2m(): for form in self.saved_forms: form.save_m2m() self.save_m2m = save_m2m return self.save_existing_objects(commit) + self.save_new_objects(commit) save.alters_data = True def clean(self): self.validate_unique() def validate_unique(self): # Collect unique_checks and date_checks to run from all the forms. all_unique_checks = set() all_date_checks = set() forms_to_delete = self.deleted_forms valid_forms = [form for form in self.forms if form.is_valid() and form not in forms_to_delete] for form in valid_forms: exclude = form._get_validation_exclusions() unique_checks, date_checks = form.instance._get_unique_checks(exclude=exclude) all_unique_checks.update(unique_checks) all_date_checks.update(date_checks) errors = [] # Do each of the unique checks (unique and unique_together) for uclass, unique_check in all_unique_checks: seen_data = set() for form in valid_forms: # Get the data for the set of fields that must be unique among the forms. row_data = ( field if field in self.unique_fields else form.cleaned_data[field] for field in unique_check if field in form.cleaned_data ) # Reduce Model instances to their primary key values row_data = tuple( d._get_pk_val() if hasattr(d, '_get_pk_val') # Prevent "unhashable type: list" errors later on. else tuple(d) if isinstance(d, list) else d for d in row_data ) if row_data and None not in row_data: # if we've already seen it then we have a uniqueness failure if row_data in seen_data: # poke error messages into the right places and mark # the form as invalid errors.append(self.get_unique_error_message(unique_check)) form._errors[NON_FIELD_ERRORS] = self.error_class([self.get_form_error()]) # remove the data from the cleaned_data dict since it was invalid for field in unique_check: if field in form.cleaned_data: del form.cleaned_data[field] # mark the data as seen seen_data.add(row_data) # iterate over each of the date checks now for date_check in all_date_checks: seen_data = set() uclass, lookup, field, unique_for = date_check for form in valid_forms: # see if we have data for both fields if (form.cleaned_data and form.cleaned_data[field] is not None and form.cleaned_data[unique_for] is not None): # if it's a date lookup we need to get the data for all the fields if lookup == 'date': date = form.cleaned_data[unique_for] date_data = (date.year, date.month, date.day) # otherwise it's just the attribute on the date/datetime # object else: date_data = (getattr(form.cleaned_data[unique_for], lookup),) data = (form.cleaned_data[field],) + date_data # if we've already seen it then we have a uniqueness failure if data in seen_data: # poke error messages into the right places and mark # the form as invalid errors.append(self.get_date_error_message(date_check)) form._errors[NON_FIELD_ERRORS] = self.error_class([self.get_form_error()]) # remove the data from the cleaned_data dict since it was invalid del form.cleaned_data[field] # mark the data as seen seen_data.add(data) if errors: raise ValidationError(errors) def get_unique_error_message(self, unique_check): if len(unique_check) == 1: return gettext("Please correct the duplicate data for %(field)s.") % { "field": unique_check[0], } else: return gettext("Please correct the duplicate data for %(field)s, which must be unique.") % { "field": get_text_list(unique_check, _("and")), } def get_date_error_message(self, date_check): return gettext( "Please correct the duplicate data for %(field_name)s " "which must be unique for the %(lookup)s in %(date_field)s." ) % { 'field_name': date_check[2], 'date_field': date_check[3], 'lookup': str(date_check[1]), } def get_form_error(self): return gettext("Please correct the duplicate values below.") def save_existing_objects(self, commit=True): self.changed_objects = [] self.deleted_objects = [] if not self.initial_forms: return [] saved_instances = [] forms_to_delete = self.deleted_forms for form in self.initial_forms: obj = form.instance # If the pk is None, it means either: # 1. The object is an unexpected empty model, created by invalid # POST data such as an object outside the formset's queryset. # 2. The object was already deleted from the database. if obj.pk is None: continue if form in forms_to_delete: self.deleted_objects.append(obj) self.delete_existing(obj, commit=commit) elif form.has_changed(): self.changed_objects.append((obj, form.changed_data)) saved_instances.append(self.save_existing(form, obj, commit=commit)) if not commit: self.saved_forms.append(form) return saved_instances def save_new_objects(self, commit=True): self.new_objects = [] for form in self.extra_forms: if not form.has_changed(): continue # If someone has marked an add form for deletion, don't save the # object. if self.can_delete and self._should_delete_form(form): continue self.new_objects.append(self.save_new(form, commit=commit)) if not commit: self.saved_forms.append(form) return self.new_objects def add_fields(self, form, index): """Add a hidden field for the object's primary key.""" from django.db.models import AutoField, OneToOneField, ForeignKey self._pk_field = pk = self.model._meta.pk # If a pk isn't editable, then it won't be on the form, so we need to # add it here so we can tell which object is which when we get the # data back. Generally, pk.editable should be false, but for some # reason, auto_created pk fields and AutoField's editable attribute is # True, so check for that as well. def pk_is_not_editable(pk): return ( (not pk.editable) or (pk.auto_created or isinstance(pk, AutoField)) or ( pk.remote_field and pk.remote_field.parent_link and pk_is_not_editable(pk.remote_field.model._meta.pk) ) ) if pk_is_not_editable(pk) or pk.name not in form.fields: if form.is_bound: # If we're adding the related instance, ignore its primary key # as it could be an auto-generated default which isn't actually # in the database. pk_value = None if form.instance._state.adding else form.instance.pk else: try: if index is not None: pk_value = self.get_queryset()[index].pk else: pk_value = None except IndexError: pk_value = None if isinstance(pk, (ForeignKey, OneToOneField)): qs = pk.remote_field.model._default_manager.get_queryset() else: qs = self.model._default_manager.get_queryset() qs = qs.using(form.instance._state.db) if form._meta.widgets: widget = form._meta.widgets.get(self._pk_field.name, HiddenInput) else: widget = HiddenInput form.fields[self._pk_field.name] = ModelChoiceField(qs, initial=pk_value, required=False, widget=widget) super().add_fields(form, index) def modelformset_factory(model, form=ModelForm, formfield_callback=None, formset=BaseModelFormSet, extra=1, can_delete=False, can_order=False, max_num=None, fields=None, exclude=None, widgets=None, validate_max=False, localized_fields=None, labels=None, help_texts=None, error_messages=None, min_num=None, validate_min=False, field_classes=None): """Return a FormSet class for the given Django model class.""" meta = getattr(form, 'Meta', None) if (getattr(meta, 'fields', fields) is None and getattr(meta, 'exclude', exclude) is None): raise ImproperlyConfigured( "Calling modelformset_factory without defining 'fields' or " "'exclude' explicitly is prohibited." ) form = modelform_factory(model, form=form, fields=fields, exclude=exclude, formfield_callback=formfield_callback, widgets=widgets, localized_fields=localized_fields, labels=labels, help_texts=help_texts, error_messages=error_messages, field_classes=field_classes) FormSet = formset_factory(form, formset, extra=extra, min_num=min_num, max_num=max_num, can_order=can_order, can_delete=can_delete, validate_min=validate_min, validate_max=validate_max) FormSet.model = model return FormSet # InlineFormSets ############################################################# class BaseInlineFormSet(BaseModelFormSet): """A formset for child objects related to a parent.""" def __init__(self, data=None, files=None, instance=None, save_as_new=False, prefix=None, queryset=None, **kwargs): if instance is None: self.instance = self.fk.remote_field.model() else: self.instance = instance self.save_as_new = save_as_new if queryset is None: queryset = self.model._default_manager if self.instance.pk is not None: qs = queryset.filter(**{self.fk.name: self.instance}) else: qs = queryset.none() self.unique_fields = {self.fk.name} super().__init__(data, files, prefix=prefix, queryset=qs, **kwargs) # Add the generated field to form._meta.fields if it's defined to make # sure validation isn't skipped on that field. if self.form._meta.fields and self.fk.name not in self.form._meta.fields: if isinstance(self.form._meta.fields, tuple): self.form._meta.fields = list(self.form._meta.fields) self.form._meta.fields.append(self.fk.name) def initial_form_count(self): if self.save_as_new: return 0 return super().initial_form_count() def _construct_form(self, i, **kwargs): form = super()._construct_form(i, **kwargs) if self.save_as_new: mutable = getattr(form.data, '_mutable', None) # Allow modifying an immutable QueryDict. if mutable is not None: form.data._mutable = True # Remove the primary key from the form's data, we are only # creating new instances form.data[form.add_prefix(self._pk_field.name)] = None # Remove the foreign key from the form's data form.data[form.add_prefix(self.fk.name)] = None if mutable is not None: form.data._mutable = mutable # Set the fk value here so that the form can do its validation. fk_value = self.instance.pk if self.fk.remote_field.field_name != self.fk.remote_field.model._meta.pk.name: fk_value = getattr(self.instance, self.fk.remote_field.field_name) fk_value = getattr(fk_value, 'pk', fk_value) setattr(form.instance, self.fk.get_attname(), fk_value) return form @classmethod def get_default_prefix(cls): return cls.fk.remote_field.get_accessor_name(model=cls.model).replace('+', '') def save_new(self, form, commit=True): # Ensure the latest copy of the related instance is present on each # form (it may have been saved after the formset was originally # instantiated). setattr(form.instance, self.fk.name, self.instance) return super().save_new(form, commit=commit) def add_fields(self, form, index): super().add_fields(form, index) if self._pk_field == self.fk: name = self._pk_field.name kwargs = {'pk_field': True} else: # The foreign key field might not be on the form, so we poke at the # Model field to get the label, since we need that for error messages. name = self.fk.name kwargs = { 'label': getattr(form.fields.get(name), 'label', capfirst(self.fk.verbose_name)) } # The InlineForeignKeyField assumes that the foreign key relation is # based on the parent model's pk. If this isn't the case, set to_field # to correctly resolve the initial form value. if self.fk.remote_field.field_name != self.fk.remote_field.model._meta.pk.name: kwargs['to_field'] = self.fk.remote_field.field_name # If we're adding a new object, ignore a parent's auto-generated key # as it will be regenerated on the save request. if self.instance._state.adding: if kwargs.get('to_field') is not None: to_field = self.instance._meta.get_field(kwargs['to_field']) else: to_field = self.instance._meta.pk if to_field.has_default(): setattr(self.instance, to_field.attname, None) form.fields[name] = InlineForeignKeyField(self.instance, **kwargs) def get_unique_error_message(self, unique_check): unique_check = [field for field in unique_check if field != self.fk.name] return super().get_unique_error_message(unique_check) def _get_foreign_key(parent_model, model, fk_name=None, can_fail=False): """ Find and return the ForeignKey from model to parent if there is one (return None if can_fail is True and no such field exists). If fk_name is provided, assume it is the name of the ForeignKey field. Unless can_fail is True, raise an exception if there isn't a ForeignKey from model to parent_model. """ # avoid circular import from django.db.models import ForeignKey opts = model._meta if fk_name: fks_to_parent = [f for f in opts.fields if f.name == fk_name] if len(fks_to_parent) == 1: fk = fks_to_parent[0] if not isinstance(fk, ForeignKey) or \ (fk.remote_field.model != parent_model and fk.remote_field.model not in parent_model._meta.get_parent_list()): raise ValueError( "fk_name '%s' is not a ForeignKey to '%s'." % (fk_name, parent_model._meta.label) ) elif not fks_to_parent: raise ValueError( "'%s' has no field named '%s'." % (model._meta.label, fk_name) ) else: # Try to discover what the ForeignKey from model to parent_model is fks_to_parent = [ f for f in opts.fields if isinstance(f, ForeignKey) and ( f.remote_field.model == parent_model or f.remote_field.model in parent_model._meta.get_parent_list() ) ] if len(fks_to_parent) == 1: fk = fks_to_parent[0] elif not fks_to_parent: if can_fail: return raise ValueError( "'%s' has no ForeignKey to '%s'." % ( model._meta.label, parent_model._meta.label, ) ) else: raise ValueError( "'%s' has more than one ForeignKey to '%s'." % ( model._meta.label, parent_model._meta.label, ) ) return fk def inlineformset_factory(parent_model, model, form=ModelForm, formset=BaseInlineFormSet, fk_name=None, fields=None, exclude=None, extra=3, can_order=False, can_delete=True, max_num=None, formfield_callback=None, widgets=None, validate_max=False, localized_fields=None, labels=None, help_texts=None, error_messages=None, min_num=None, validate_min=False, field_classes=None): """ Return an ``InlineFormSet`` for the given kwargs. ``fk_name`` must be provided if ``model`` has more than one ``ForeignKey`` to ``parent_model``. """ fk = _get_foreign_key(parent_model, model, fk_name=fk_name) # enforce a max_num=1 when the foreign key to the parent model is unique. if fk.unique: max_num = 1 kwargs = { 'form': form, 'formfield_callback': formfield_callback, 'formset': formset, 'extra': extra, 'can_delete': can_delete, 'can_order': can_order, 'fields': fields, 'exclude': exclude, 'min_num': min_num, 'max_num': max_num, 'widgets': widgets, 'validate_min': validate_min, 'validate_max': validate_max, 'localized_fields': localized_fields, 'labels': labels, 'help_texts': help_texts, 'error_messages': error_messages, 'field_classes': field_classes, } FormSet = modelformset_factory(model, **kwargs) FormSet.fk = fk return FormSet # Fields ##################################################################### class InlineForeignKeyField(Field): """ A basic integer field that deals with validating the given value to a given parent instance in an inline. """ widget = HiddenInput default_error_messages = { 'invalid_choice': _('The inline value did not match the parent instance.'), } def __init__(self, parent_instance, *args, pk_field=False, to_field=None, **kwargs): self.parent_instance = parent_instance self.pk_field = pk_field self.to_field = to_field if self.parent_instance is not None: if self.to_field: kwargs["initial"] = getattr(self.parent_instance, self.to_field) else: kwargs["initial"] = self.parent_instance.pk kwargs["required"] = False super().__init__(*args, **kwargs) def clean(self, value): if value in self.empty_values: if self.pk_field: return None # if there is no value act as we did before. return self.parent_instance # ensure the we compare the values as equal types. if self.to_field: orig = getattr(self.parent_instance, self.to_field) else: orig = self.parent_instance.pk if str(value) != str(orig): raise ValidationError(self.error_messages['invalid_choice'], code='invalid_choice') return self.parent_instance def has_changed(self, initial, data): return False class ModelChoiceIterator: def __init__(self, field): self.field = field self.queryset = field.queryset def __iter__(self): if self.field.empty_label is not None: yield ("", self.field.empty_label) queryset = self.queryset # Can't use iterator() when queryset uses prefetch_related() if not queryset._prefetch_related_lookups: queryset = queryset.iterator() for obj in queryset: yield self.choice(obj) def __len__(self): # count() adds a query but uses less memory since the QuerySet results # won't be cached. In most cases, the choices will only be iterated on, # and __len__() won't be called. return self.queryset.count() + (1 if self.field.empty_label is not None else 0) def __bool__(self): return self.field.empty_label is not None or self.queryset.exists() def choice(self, obj): return (self.field.prepare_value(obj), self.field.label_from_instance(obj)) class ModelChoiceField(ChoiceField): """A ChoiceField whose choices are a model QuerySet.""" # This class is a subclass of ChoiceField for purity, but it doesn't # actually use any of ChoiceField's implementation. default_error_messages = { 'invalid_choice': _('Select a valid choice. That choice is not one of' ' the available choices.'), } iterator = ModelChoiceIterator def __init__(self, queryset, *, empty_label="---------", required=True, widget=None, label=None, initial=None, help_text='', to_field_name=None, limit_choices_to=None, **kwargs): if required and (initial is not None): self.empty_label = None else: self.empty_label = empty_label # Call Field instead of ChoiceField __init__() because we don't need # ChoiceField.__init__(). Field.__init__( self, required=required, widget=widget, label=label, initial=initial, help_text=help_text, **kwargs ) self.queryset = queryset self.limit_choices_to = limit_choices_to # limit the queryset later. self.to_field_name = to_field_name def get_limit_choices_to(self): """ Return ``limit_choices_to`` for this form field. If it is a callable, invoke it and return the result. """ if callable(self.limit_choices_to): return self.limit_choices_to() return self.limit_choices_to def __deepcopy__(self, memo): result = super(ChoiceField, self).__deepcopy__(memo) # Need to force a new ModelChoiceIterator to be created, bug #11183 if self.queryset is not None: result.queryset = self.queryset.all() return result def _get_queryset(self): return self._queryset def _set_queryset(self, queryset): self._queryset = None if queryset is None else queryset.all() self.widget.choices = self.choices queryset = property(_get_queryset, _set_queryset) # this method will be used to create object labels by the QuerySetIterator. # Override it to customize the label. def label_from_instance(self, obj): """ Convert objects into strings and generate the labels for the choices presented by this object. Subclasses can override this method to customize the display of the choices. """ return str(obj) def _get_choices(self): # If self._choices is set, then somebody must have manually set # the property self.choices. In this case, just return self._choices. if hasattr(self, '_choices'): return self._choices # Otherwise, execute the QuerySet in self.queryset to determine the # choices dynamically. Return a fresh ModelChoiceIterator that has not been # consumed. Note that we're instantiating a new ModelChoiceIterator *each* # time _get_choices() is called (and, thus, each time self.choices is # accessed) so that we can ensure the QuerySet has not been consumed. This # construct might look complicated but it allows for lazy evaluation of # the queryset. return self.iterator(self) choices = property(_get_choices, ChoiceField._set_choices) def prepare_value(self, value): if hasattr(value, '_meta'): if self.to_field_name: return value.serializable_value(self.to_field_name) else: return value.pk return super().prepare_value(value) def to_python(self, value): if value in self.empty_values: return None try: key = self.to_field_name or 'pk' value = self.queryset.get(**{key: value}) except (ValueError, TypeError, self.queryset.model.DoesNotExist): raise ValidationError(self.error_messages['invalid_choice'], code='invalid_choice') return value def validate(self, value): return Field.validate(self, value) def has_changed(self, initial, data): if self.disabled: return False initial_value = initial if initial is not None else '' data_value = data if data is not None else '' return str(self.prepare_value(initial_value)) != str(data_value) class ModelMultipleChoiceField(ModelChoiceField): """A MultipleChoiceField whose choices are a model QuerySet.""" widget = SelectMultiple hidden_widget = MultipleHiddenInput default_error_messages = { 'list': _('Enter a list of values.'), 'invalid_choice': _('Select a valid choice. %(value)s is not one of the' ' available choices.'), 'invalid_pk_value': _('"%(pk)s" is not a valid value.') } def __init__(self, queryset, **kwargs): super().__init__(queryset, empty_label=None, **kwargs) def to_python(self, value): if not value: return [] return list(self._check_values(value)) def clean(self, value): value = self.prepare_value(value) if self.required and not value: raise ValidationError(self.error_messages['required'], code='required') elif not self.required and not value: return self.queryset.none() if not isinstance(value, (list, tuple)): raise ValidationError(self.error_messages['list'], code='list') qs = self._check_values(value) # Since this overrides the inherited ModelChoiceField.clean # we run custom validators here self.run_validators(value) return qs def _check_values(self, value): """ Given a list of possible PK values, return a QuerySet of the corresponding objects. Raise a ValidationError if a given value is invalid (not a valid PK, not in the queryset, etc.) """ key = self.to_field_name or 'pk' # deduplicate given values to avoid creating many querysets or # requiring the database backend deduplicate efficiently. try: value = frozenset(value) except TypeError: # list of lists isn't hashable, for example raise ValidationError( self.error_messages['list'], code='list', ) for pk in value: try: self.queryset.filter(**{key: pk}) except (ValueError, TypeError): raise ValidationError( self.error_messages['invalid_pk_value'], code='invalid_pk_value', params={'pk': pk}, ) qs = self.queryset.filter(**{'%s__in' % key: value}) pks = {str(getattr(o, key)) for o in qs} for val in value: if str(val) not in pks: raise ValidationError( self.error_messages['invalid_choice'], code='invalid_choice', params={'value': val}, ) return qs def prepare_value(self, value): if (hasattr(value, '__iter__') and not isinstance(value, str) and not hasattr(value, '_meta')): prepare_value = super().prepare_value return [prepare_value(v) for v in value] return super().prepare_value(value) def has_changed(self, initial, data): if self.disabled: return False if initial is None: initial = [] if data is None: data = [] if len(initial) != len(data): return True initial_set = {str(value) for value in self.prepare_value(initial)} data_set = {str(value) for value in data} return data_set != initial_set def modelform_defines_fields(form_class): return hasattr(form_class, '_meta') and ( form_class._meta.fields is not None or form_class._meta.exclude is not None )
51bb6c02eeb7acfada010b11749f9ea7634dc81188402492fb1cba36f2172a0e
""" Create SQL statements for QuerySets. The code in here encapsulates all of the SQL construction so that QuerySets themselves do not have to (and could be backed by things other than SQL databases). The abstraction barrier only works one way: this module has to know all about the internals of models in order to get the information it needs. """ import difflib import functools import inspect import sys import warnings from collections import Counter, namedtuple from collections.abc import Iterator, Mapping from itertools import chain, count, product from string import ascii_uppercase from django.core.exceptions import ( EmptyResultSet, FieldDoesNotExist, FieldError, ) from django.db import DEFAULT_DB_ALIAS, NotSupportedError, connections from django.db.models.aggregates import Count from django.db.models.constants import LOOKUP_SEP from django.db.models.expressions import ( BaseExpression, Col, F, OuterRef, Ref, SimpleCol, ) from django.db.models.fields import Field from django.db.models.fields.related_lookups import MultiColSource from django.db.models.lookups import Lookup from django.db.models.query_utils import ( Q, check_rel_lookup_compatibility, refs_expression, ) from django.db.models.sql.constants import ( INNER, LOUTER, ORDER_DIR, ORDER_PATTERN, SINGLE, ) from django.db.models.sql.datastructures import ( BaseTable, Empty, Join, MultiJoin, ) from django.db.models.sql.where import ( AND, OR, ExtraWhere, NothingNode, WhereNode, ) from django.utils.deprecation import RemovedInDjango40Warning from django.utils.functional import cached_property from django.utils.tree import Node __all__ = ['Query', 'RawQuery'] def get_field_names_from_opts(opts): return set(chain.from_iterable( (f.name, f.attname) if f.concrete else (f.name,) for f in opts.get_fields() )) def get_children_from_q(q): for child in q.children: if isinstance(child, Node): yield from get_children_from_q(child) else: yield child JoinInfo = namedtuple( 'JoinInfo', ('final_field', 'targets', 'opts', 'joins', 'path', 'transform_function') ) def _get_col(target, field, alias, simple_col): if simple_col: return SimpleCol(target, field) return target.get_col(alias, field) class RawQuery: """A single raw SQL query.""" def __init__(self, sql, using, params=None): self.params = params or () self.sql = sql self.using = using self.cursor = None # Mirror some properties of a normal query so that # the compiler can be used to process results. self.low_mark, self.high_mark = 0, None # Used for offset/limit self.extra_select = {} self.annotation_select = {} def chain(self, using): return self.clone(using) def clone(self, using): return RawQuery(self.sql, using, params=self.params) def get_columns(self): if self.cursor is None: self._execute_query() converter = connections[self.using].introspection.identifier_converter return [converter(column_meta[0]) for column_meta in self.cursor.description] def __iter__(self): # Always execute a new query for a new iterator. # This could be optimized with a cache at the expense of RAM. self._execute_query() if not connections[self.using].features.can_use_chunked_reads: # If the database can't use chunked reads we need to make sure we # evaluate the entire query up front. result = list(self.cursor) else: result = self.cursor return iter(result) def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self) @property def params_type(self): return dict if isinstance(self.params, Mapping) else tuple def __str__(self): return self.sql % self.params_type(self.params) def _execute_query(self): connection = connections[self.using] # Adapt parameters to the database, as much as possible considering # that the target type isn't known. See #17755. params_type = self.params_type adapter = connection.ops.adapt_unknown_value if params_type is tuple: params = tuple(adapter(val) for val in self.params) elif params_type is dict: params = {key: adapter(val) for key, val in self.params.items()} else: raise RuntimeError("Unexpected params type: %s" % params_type) self.cursor = connection.cursor() self.cursor.execute(self.sql, params) class Query(BaseExpression): """A single SQL query.""" alias_prefix = 'T' subq_aliases = frozenset([alias_prefix]) compiler = 'SQLCompiler' def __init__(self, model, where=WhereNode): self.model = model self.alias_refcount = {} # alias_map is the most important data structure regarding joins. # It's used for recording which joins exist in the query and what # types they are. The key is the alias of the joined table (possibly # the table name) and the value is a Join-like object (see # sql.datastructures.Join for more information). self.alias_map = {} # Sometimes the query contains references to aliases in outer queries (as # a result of split_exclude). Correct alias quoting needs to know these # aliases too. self.external_aliases = set() self.table_map = {} # Maps table names to list of aliases. self.default_cols = True self.default_ordering = True self.standard_ordering = True self.used_aliases = set() self.filter_is_sticky = False self.subquery = False # SQL-related attributes # Select and related select clauses are expressions to use in the # SELECT clause of the query. # The select is used for cases where we want to set up the select # clause to contain other than default fields (values(), subqueries...) # Note that annotations go to annotations dictionary. self.select = () self.where = where() self.where_class = where # The group_by attribute can have one of the following forms: # - None: no group by at all in the query # - A tuple of expressions: group by (at least) those expressions. # String refs are also allowed for now. # - True: group by all select fields of the model # See compiler.get_group_by() for details. self.group_by = None self.order_by = () self.low_mark, self.high_mark = 0, None # Used for offset/limit self.distinct = False self.distinct_fields = () self.select_for_update = False self.select_for_update_nowait = False self.select_for_update_skip_locked = False self.select_for_update_of = () self.select_related = False # Arbitrary limit for select_related to prevents infinite recursion. self.max_depth = 5 # Holds the selects defined by a call to values() or values_list() # excluding annotation_select and extra_select. self.values_select = () # SQL annotation-related attributes self.annotations = {} # Maps alias -> Annotation Expression self.annotation_select_mask = None self._annotation_select_cache = None # Set combination attributes self.combinator = None self.combinator_all = False self.combined_queries = () # These are for extensions. The contents are more or less appended # verbatim to the appropriate clause. self.extra = {} # Maps col_alias -> (col_sql, params). self.extra_select_mask = None self._extra_select_cache = None self.extra_tables = () self.extra_order_by = () # A tuple that is a set of model field names and either True, if these # are the fields to defer, or False if these are the only fields to # load. self.deferred_loading = (frozenset(), True) self._filtered_relations = {} self.explain_query = False self.explain_format = None self.explain_options = {} @property def output_field(self): if len(self.select) == 1: return self.select[0].field elif len(self.annotation_select) == 1: return next(iter(self.annotation_select.values())).output_field @property def has_select_fields(self): return bool(self.select or self.annotation_select_mask or self.extra_select_mask) @cached_property def base_table(self): for alias in self.alias_map: return alias def __str__(self): """ Return the query as a string of SQL with the parameter values substituted in (use sql_with_params() to see the unsubstituted string). Parameter values won't necessarily be quoted correctly, since that is done by the database interface at execution time. """ sql, params = self.sql_with_params() return sql % params def sql_with_params(self): """ Return the query as an SQL string and the parameters that will be substituted into the query. """ return self.get_compiler(DEFAULT_DB_ALIAS).as_sql() def __deepcopy__(self, memo): """Limit the amount of work when a Query is deepcopied.""" result = self.clone() memo[id(self)] = result return result def get_compiler(self, using=None, connection=None): if using is None and connection is None: raise ValueError("Need either using or connection") if using: connection = connections[using] return connection.ops.compiler(self.compiler)(self, connection, using) def get_meta(self): """ Return the Options instance (the model._meta) from which to start processing. Normally, this is self.model._meta, but it can be changed by subclasses. """ return self.model._meta def clone(self): """ Return a copy of the current Query. A lightweight alternative to to deepcopy(). """ obj = Empty() obj.__class__ = self.__class__ # Copy references to everything. obj.__dict__ = self.__dict__.copy() # Clone attributes that can't use shallow copy. obj.alias_refcount = self.alias_refcount.copy() obj.alias_map = self.alias_map.copy() obj.external_aliases = self.external_aliases.copy() obj.table_map = self.table_map.copy() obj.where = self.where.clone() obj.annotations = self.annotations.copy() if self.annotation_select_mask is None: obj.annotation_select_mask = None else: obj.annotation_select_mask = self.annotation_select_mask.copy() # _annotation_select_cache cannot be copied, as doing so breaks the # (necessary) state in which both annotations and # _annotation_select_cache point to the same underlying objects. # It will get re-populated in the cloned queryset the next time it's # used. obj._annotation_select_cache = None obj.extra = self.extra.copy() if self.extra_select_mask is None: obj.extra_select_mask = None else: obj.extra_select_mask = self.extra_select_mask.copy() if self._extra_select_cache is None: obj._extra_select_cache = None else: obj._extra_select_cache = self._extra_select_cache.copy() if 'subq_aliases' in self.__dict__: obj.subq_aliases = self.subq_aliases.copy() obj.used_aliases = self.used_aliases.copy() obj._filtered_relations = self._filtered_relations.copy() # Clear the cached_property try: del obj.base_table except AttributeError: pass return obj def chain(self, klass=None): """ Return a copy of the current Query that's ready for another operation. The klass argument changes the type of the Query, e.g. UpdateQuery. """ obj = self.clone() if klass and obj.__class__ != klass: obj.__class__ = klass if not obj.filter_is_sticky: obj.used_aliases = set() obj.filter_is_sticky = False if hasattr(obj, '_setup_query'): obj._setup_query() return obj def relabeled_clone(self, change_map): clone = self.clone() clone.change_aliases(change_map) return clone def rewrite_cols(self, annotation, col_cnt): # We must make sure the inner query has the referred columns in it. # If we are aggregating over an annotation, then Django uses Ref() # instances to note this. However, if we are annotating over a column # of a related model, then it might be that column isn't part of the # SELECT clause of the inner query, and we must manually make sure # the column is selected. An example case is: # .aggregate(Sum('author__awards')) # Resolving this expression results in a join to author, but there # is no guarantee the awards column of author is in the select clause # of the query. Thus we must manually add the column to the inner # query. orig_exprs = annotation.get_source_expressions() new_exprs = [] for expr in orig_exprs: # FIXME: These conditions are fairly arbitrary. Identify a better # method of having expressions decide which code path they should # take. if isinstance(expr, Ref): # Its already a Ref to subquery (see resolve_ref() for # details) new_exprs.append(expr) elif isinstance(expr, (WhereNode, Lookup)): # Decompose the subexpressions further. The code here is # copied from the else clause, but this condition must appear # before the contains_aggregate/is_summary condition below. new_expr, col_cnt = self.rewrite_cols(expr, col_cnt) new_exprs.append(new_expr) else: # Reuse aliases of expressions already selected in subquery. for col_alias, selected_annotation in self.annotation_select.items(): if selected_annotation == expr: new_expr = Ref(col_alias, expr) break else: # An expression that is not selected the subquery. if isinstance(expr, Col) or (expr.contains_aggregate and not expr.is_summary): # Reference column or another aggregate. Select it # under a non-conflicting alias. col_cnt += 1 col_alias = '__col%d' % col_cnt self.annotations[col_alias] = expr self.append_annotation_mask([col_alias]) new_expr = Ref(col_alias, expr) else: # Some other expression not referencing database values # directly. Its subexpression might contain Cols. new_expr, col_cnt = self.rewrite_cols(expr, col_cnt) new_exprs.append(new_expr) annotation.set_source_expressions(new_exprs) return annotation, col_cnt def get_aggregation(self, using, added_aggregate_names): """ Return the dictionary with the values of the existing aggregations. """ if not self.annotation_select: return {} has_limit = self.low_mark != 0 or self.high_mark is not None existing_annotations = [ annotation for alias, annotation in self.annotations.items() if alias not in added_aggregate_names ] # Decide if we need to use a subquery. # # Existing annotations would cause incorrect results as get_aggregation() # must produce just one result and thus must not use GROUP BY. But we # aren't smart enough to remove the existing annotations from the # query, so those would force us to use GROUP BY. # # If the query has limit or distinct, or uses set operations, then # those operations must be done in a subquery so that the query # aggregates on the limit and/or distinct results instead of applying # the distinct and limit after the aggregation. if (isinstance(self.group_by, tuple) or has_limit or existing_annotations or self.distinct or self.combinator): from django.db.models.sql.subqueries import AggregateQuery outer_query = AggregateQuery(self.model) inner_query = self.clone() inner_query.select_for_update = False inner_query.select_related = False inner_query.set_annotation_mask(self.annotation_select) if not has_limit and not self.distinct_fields: # Queries with distinct_fields need ordering and when a limit # is applied we must take the slice from the ordered query. # Otherwise no need for ordering. inner_query.clear_ordering(True) if not inner_query.distinct: # If the inner query uses default select and it has some # aggregate annotations, then we must make sure the inner # query is grouped by the main model's primary key. However, # clearing the select clause can alter results if distinct is # used. has_existing_aggregate_annotations = any( annotation for annotation in existing_annotations if getattr(annotation, 'contains_aggregate', True) ) if inner_query.default_cols and has_existing_aggregate_annotations: inner_query.group_by = (self.model._meta.pk.get_col(inner_query.get_initial_alias()),) inner_query.default_cols = False relabels = {t: 'subquery' for t in inner_query.alias_map} relabels[None] = 'subquery' # Remove any aggregates marked for reduction from the subquery # and move them to the outer AggregateQuery. col_cnt = 0 for alias, expression in list(inner_query.annotation_select.items()): annotation_select_mask = inner_query.annotation_select_mask if expression.is_summary: expression, col_cnt = inner_query.rewrite_cols(expression, col_cnt) outer_query.annotations[alias] = expression.relabeled_clone(relabels) del inner_query.annotations[alias] annotation_select_mask.remove(alias) # Make sure the annotation_select wont use cached results. inner_query.set_annotation_mask(inner_query.annotation_select_mask) if inner_query.select == () and not inner_query.default_cols and not inner_query.annotation_select_mask: # In case of Model.objects[0:3].count(), there would be no # field selected in the inner query, yet we must use a subquery. # So, make sure at least one field is selected. inner_query.select = (self.model._meta.pk.get_col(inner_query.get_initial_alias()),) try: outer_query.add_subquery(inner_query, using) except EmptyResultSet: return { alias: None for alias in outer_query.annotation_select } else: outer_query = self self.select = () self.default_cols = False self.extra = {} outer_query.clear_ordering(True) outer_query.clear_limits() outer_query.select_for_update = False outer_query.select_related = False compiler = outer_query.get_compiler(using) result = compiler.execute_sql(SINGLE) if result is None: result = [None] * len(outer_query.annotation_select) converters = compiler.get_converters(outer_query.annotation_select.values()) result = next(compiler.apply_converters((result,), converters)) return dict(zip(outer_query.annotation_select, result)) def get_count(self, using): """ Perform a COUNT() query using the current filter constraints. """ obj = self.clone() obj.add_annotation(Count('*'), alias='__count', is_summary=True) number = obj.get_aggregation(using, ['__count'])['__count'] if number is None: number = 0 return number def has_filters(self): return self.where def has_results(self, using): q = self.clone() if not q.distinct: if q.group_by is True: q.add_fields((f.attname for f in self.model._meta.concrete_fields), False) q.set_group_by() q.clear_select_clause() q.clear_ordering(True) q.set_limits(high=1) compiler = q.get_compiler(using=using) return compiler.has_results() def explain(self, using, format=None, **options): q = self.clone() q.explain_query = True q.explain_format = format q.explain_options = options compiler = q.get_compiler(using=using) return '\n'.join(compiler.explain_query()) def combine(self, rhs, connector): """ Merge the 'rhs' query into the current one (with any 'rhs' effects being applied *after* (that is, "to the right of") anything in the current query. 'rhs' is not modified during a call to this function. The 'connector' parameter describes how to connect filters from the 'rhs' query. """ assert self.model == rhs.model, \ "Cannot combine queries on two different base models." assert self.can_filter(), \ "Cannot combine queries once a slice has been taken." assert self.distinct == rhs.distinct, \ "Cannot combine a unique query with a non-unique query." assert self.distinct_fields == rhs.distinct_fields, \ "Cannot combine queries with different distinct fields." # Work out how to relabel the rhs aliases, if necessary. change_map = {} conjunction = (connector == AND) # Determine which existing joins can be reused. When combining the # query with AND we must recreate all joins for m2m filters. When # combining with OR we can reuse joins. The reason is that in AND # case a single row can't fulfill a condition like: # revrel__col=1 & revrel__col=2 # But, there might be two different related rows matching this # condition. In OR case a single True is enough, so single row is # enough, too. # # Note that we will be creating duplicate joins for non-m2m joins in # the AND case. The results will be correct but this creates too many # joins. This is something that could be fixed later on. reuse = set() if conjunction else set(self.alias_map) # Base table must be present in the query - this is the same # table on both sides. self.get_initial_alias() joinpromoter = JoinPromoter(connector, 2, False) joinpromoter.add_votes( j for j in self.alias_map if self.alias_map[j].join_type == INNER) rhs_votes = set() # Now, add the joins from rhs query into the new query (skipping base # table). rhs_tables = list(rhs.alias_map)[1:] for alias in rhs_tables: join = rhs.alias_map[alias] # If the left side of the join was already relabeled, use the # updated alias. join = join.relabeled_clone(change_map) new_alias = self.join(join, reuse=reuse) if join.join_type == INNER: rhs_votes.add(new_alias) # We can't reuse the same join again in the query. If we have two # distinct joins for the same connection in rhs query, then the # combined query must have two joins, too. reuse.discard(new_alias) if alias != new_alias: change_map[alias] = new_alias if not rhs.alias_refcount[alias]: # The alias was unused in the rhs query. Unref it so that it # will be unused in the new query, too. We have to add and # unref the alias so that join promotion has information of # the join type for the unused alias. self.unref_alias(new_alias) joinpromoter.add_votes(rhs_votes) joinpromoter.update_join_types(self) # Now relabel a copy of the rhs where-clause and add it to the current # one. w = rhs.where.clone() w.relabel_aliases(change_map) self.where.add(w, connector) # Selection columns and extra extensions are those provided by 'rhs'. if rhs.select: self.set_select([col.relabeled_clone(change_map) for col in rhs.select]) else: self.select = () if connector == OR: # It would be nice to be able to handle this, but the queries don't # really make sense (or return consistent value sets). Not worth # the extra complexity when you can write a real query instead. if self.extra and rhs.extra: raise ValueError("When merging querysets using 'or', you cannot have extra(select=...) on both sides.") self.extra.update(rhs.extra) extra_select_mask = set() if self.extra_select_mask is not None: extra_select_mask.update(self.extra_select_mask) if rhs.extra_select_mask is not None: extra_select_mask.update(rhs.extra_select_mask) if extra_select_mask: self.set_extra_mask(extra_select_mask) self.extra_tables += rhs.extra_tables # Ordering uses the 'rhs' ordering, unless it has none, in which case # the current ordering is used. self.order_by = rhs.order_by or self.order_by self.extra_order_by = rhs.extra_order_by or self.extra_order_by def deferred_to_data(self, target, callback): """ Convert the self.deferred_loading data structure to an alternate data structure, describing the field that *will* be loaded. This is used to compute the columns to select from the database and also by the QuerySet class to work out which fields are being initialized on each model. Models that have all their fields included aren't mentioned in the result, only those that have field restrictions in place. The "target" parameter is the instance that is populated (in place). The "callback" is a function that is called whenever a (model, field) pair need to be added to "target". It accepts three parameters: "target", and the model and list of fields being added for that model. """ field_names, defer = self.deferred_loading if not field_names: return orig_opts = self.get_meta() seen = {} must_include = {orig_opts.concrete_model: {orig_opts.pk}} for field_name in field_names: parts = field_name.split(LOOKUP_SEP) cur_model = self.model._meta.concrete_model opts = orig_opts for name in parts[:-1]: old_model = cur_model if name in self._filtered_relations: name = self._filtered_relations[name].relation_name source = opts.get_field(name) if is_reverse_o2o(source): cur_model = source.related_model else: cur_model = source.remote_field.model opts = cur_model._meta # Even if we're "just passing through" this model, we must add # both the current model's pk and the related reference field # (if it's not a reverse relation) to the things we select. if not is_reverse_o2o(source): must_include[old_model].add(source) add_to_dict(must_include, cur_model, opts.pk) field = opts.get_field(parts[-1]) is_reverse_object = field.auto_created and not field.concrete model = field.related_model if is_reverse_object else field.model model = model._meta.concrete_model if model == opts.model: model = cur_model if not is_reverse_o2o(field): add_to_dict(seen, model, field) if defer: # We need to load all fields for each model, except those that # appear in "seen" (for all models that appear in "seen"). The only # slight complexity here is handling fields that exist on parent # models. workset = {} for model, values in seen.items(): for field in model._meta.local_fields: if field not in values: m = field.model._meta.concrete_model add_to_dict(workset, m, field) for model, values in must_include.items(): # If we haven't included a model in workset, we don't add the # corresponding must_include fields for that model, since an # empty set means "include all fields". That's why there's no # "else" branch here. if model in workset: workset[model].update(values) for model, values in workset.items(): callback(target, model, values) else: for model, values in must_include.items(): if model in seen: seen[model].update(values) else: # As we've passed through this model, but not explicitly # included any fields, we have to make sure it's mentioned # so that only the "must include" fields are pulled in. seen[model] = values # Now ensure that every model in the inheritance chain is mentioned # in the parent list. Again, it must be mentioned to ensure that # only "must include" fields are pulled in. for model in orig_opts.get_parent_list(): seen.setdefault(model, set()) for model, values in seen.items(): callback(target, model, values) def table_alias(self, table_name, create=False, filtered_relation=None): """ Return a table alias for the given table_name and whether this is a new alias or not. If 'create' is true, a new alias is always created. Otherwise, the most recently created alias for the table (if one exists) is reused. """ alias_list = self.table_map.get(table_name) if not create and alias_list: alias = alias_list[0] self.alias_refcount[alias] += 1 return alias, False # Create a new alias for this table. if alias_list: alias = '%s%d' % (self.alias_prefix, len(self.alias_map) + 1) alias_list.append(alias) else: # The first occurrence of a table uses the table name directly. alias = filtered_relation.alias if filtered_relation is not None else table_name self.table_map[table_name] = [alias] self.alias_refcount[alias] = 1 return alias, True def ref_alias(self, alias): """Increases the reference count for this alias.""" self.alias_refcount[alias] += 1 def unref_alias(self, alias, amount=1): """Decreases the reference count for this alias.""" self.alias_refcount[alias] -= amount def promote_joins(self, aliases): """ Promote recursively the join type of given aliases and its children to an outer join. If 'unconditional' is False, only promote the join if it is nullable or the parent join is an outer join. The children promotion is done to avoid join chains that contain a LOUTER b INNER c. So, if we have currently a INNER b INNER c and a->b is promoted, then we must also promote b->c automatically, or otherwise the promotion of a->b doesn't actually change anything in the query results. """ aliases = list(aliases) while aliases: alias = aliases.pop(0) if self.alias_map[alias].join_type is None: # This is the base table (first FROM entry) - this table # isn't really joined at all in the query, so we should not # alter its join type. continue # Only the first alias (skipped above) should have None join_type assert self.alias_map[alias].join_type is not None parent_alias = self.alias_map[alias].parent_alias parent_louter = parent_alias and self.alias_map[parent_alias].join_type == LOUTER already_louter = self.alias_map[alias].join_type == LOUTER if ((self.alias_map[alias].nullable or parent_louter) and not already_louter): self.alias_map[alias] = self.alias_map[alias].promote() # Join type of 'alias' changed, so re-examine all aliases that # refer to this one. aliases.extend( join for join in self.alias_map if self.alias_map[join].parent_alias == alias and join not in aliases ) def demote_joins(self, aliases): """ Change join type from LOUTER to INNER for all joins in aliases. Similarly to promote_joins(), this method must ensure no join chains containing first an outer, then an inner join are generated. If we are demoting b->c join in chain a LOUTER b LOUTER c then we must demote a->b automatically, or otherwise the demotion of b->c doesn't actually change anything in the query results. . """ aliases = list(aliases) while aliases: alias = aliases.pop(0) if self.alias_map[alias].join_type == LOUTER: self.alias_map[alias] = self.alias_map[alias].demote() parent_alias = self.alias_map[alias].parent_alias if self.alias_map[parent_alias].join_type == INNER: aliases.append(parent_alias) def reset_refcounts(self, to_counts): """ Reset reference counts for aliases so that they match the value passed in `to_counts`. """ for alias, cur_refcount in self.alias_refcount.copy().items(): unref_amount = cur_refcount - to_counts.get(alias, 0) self.unref_alias(alias, unref_amount) def change_aliases(self, change_map): """ Change the aliases in change_map (which maps old-alias -> new-alias), relabelling any references to them in select columns and the where clause. """ assert set(change_map).isdisjoint(change_map.values()) # 1. Update references in "select" (normal columns plus aliases), # "group by" and "where". self.where.relabel_aliases(change_map) if isinstance(self.group_by, tuple): self.group_by = tuple([col.relabeled_clone(change_map) for col in self.group_by]) self.select = tuple([col.relabeled_clone(change_map) for col in self.select]) self.annotations = self.annotations and { key: col.relabeled_clone(change_map) for key, col in self.annotations.items() } # 2. Rename the alias in the internal table/alias datastructures. for old_alias, new_alias in change_map.items(): if old_alias not in self.alias_map: continue alias_data = self.alias_map[old_alias].relabeled_clone(change_map) self.alias_map[new_alias] = alias_data self.alias_refcount[new_alias] = self.alias_refcount[old_alias] del self.alias_refcount[old_alias] del self.alias_map[old_alias] table_aliases = self.table_map[alias_data.table_name] for pos, alias in enumerate(table_aliases): if alias == old_alias: table_aliases[pos] = new_alias break self.external_aliases = {change_map.get(alias, alias) for alias in self.external_aliases} def bump_prefix(self, outer_query): """ Change the alias prefix to the next letter in the alphabet in a way that the outer query's aliases and this query's aliases will not conflict. Even tables that previously had no alias will get an alias after this call. """ def prefix_gen(): """ Generate a sequence of characters in alphabetical order: -> 'A', 'B', 'C', ... When the alphabet is finished, the sequence will continue with the Cartesian product: -> 'AA', 'AB', 'AC', ... """ alphabet = ascii_uppercase prefix = chr(ord(self.alias_prefix) + 1) yield prefix for n in count(1): seq = alphabet[alphabet.index(prefix):] if prefix else alphabet for s in product(seq, repeat=n): yield ''.join(s) prefix = None if self.alias_prefix != outer_query.alias_prefix: # No clashes between self and outer query should be possible. return # Explicitly avoid infinite loop. The constant divider is based on how # much depth recursive subquery references add to the stack. This value # might need to be adjusted when adding or removing function calls from # the code path in charge of performing these operations. local_recursion_limit = sys.getrecursionlimit() // 16 for pos, prefix in enumerate(prefix_gen()): if prefix not in self.subq_aliases: self.alias_prefix = prefix break if pos > local_recursion_limit: raise RecursionError( 'Maximum recursion depth exceeded: too many subqueries.' ) self.subq_aliases = self.subq_aliases.union([self.alias_prefix]) outer_query.subq_aliases = outer_query.subq_aliases.union(self.subq_aliases) self.change_aliases({ alias: '%s%d' % (self.alias_prefix, pos) for pos, alias in enumerate(self.alias_map) }) def get_initial_alias(self): """ Return the first alias for this query, after increasing its reference count. """ if self.alias_map: alias = self.base_table self.ref_alias(alias) else: alias = self.join(BaseTable(self.get_meta().db_table, None)) return alias def count_active_tables(self): """ Return the number of tables in this query with a non-zero reference count. After execution, the reference counts are zeroed, so tables added in compiler will not be seen by this method. """ return len([1 for count in self.alias_refcount.values() if count]) def join(self, join, reuse=None, reuse_with_filtered_relation=False): """ Return an alias for the 'join', either reusing an existing alias for that join or creating a new one. 'join' is either a sql.datastructures.BaseTable or Join. The 'reuse' parameter can be either None which means all joins are reusable, or it can be a set containing the aliases that can be reused. The 'reuse_with_filtered_relation' parameter is used when computing FilteredRelation instances. A join is always created as LOUTER if the lhs alias is LOUTER to make sure chains like t1 LOUTER t2 INNER t3 aren't generated. All new joins are created as LOUTER if the join is nullable. """ if reuse_with_filtered_relation and reuse: reuse_aliases = [ a for a, j in self.alias_map.items() if a in reuse and j.equals(join, with_filtered_relation=False) ] else: reuse_aliases = [ a for a, j in self.alias_map.items() if (reuse is None or a in reuse) and j == join ] if reuse_aliases: if join.table_alias in reuse_aliases: reuse_alias = join.table_alias else: # Reuse the most recent alias of the joined table # (a many-to-many relation may be joined multiple times). reuse_alias = reuse_aliases[-1] self.ref_alias(reuse_alias) return reuse_alias # No reuse is possible, so we need a new alias. alias, _ = self.table_alias(join.table_name, create=True, filtered_relation=join.filtered_relation) if join.join_type: if self.alias_map[join.parent_alias].join_type == LOUTER or join.nullable: join_type = LOUTER else: join_type = INNER join.join_type = join_type join.table_alias = alias self.alias_map[alias] = join return alias def join_parent_model(self, opts, model, alias, seen): """ Make sure the given 'model' is joined in the query. If 'model' isn't a parent of 'opts' or if it is None this method is a no-op. The 'alias' is the root alias for starting the join, 'seen' is a dict of model -> alias of existing joins. It must also contain a mapping of None -> some alias. This will be returned in the no-op case. """ if model in seen: return seen[model] chain = opts.get_base_chain(model) if not chain: return alias curr_opts = opts for int_model in chain: if int_model in seen: curr_opts = int_model._meta alias = seen[int_model] continue # Proxy model have elements in base chain # with no parents, assign the new options # object and skip to the next base in that # case if not curr_opts.parents[int_model]: curr_opts = int_model._meta continue link_field = curr_opts.get_ancestor_link(int_model) join_info = self.setup_joins([link_field.name], curr_opts, alias) curr_opts = int_model._meta alias = seen[int_model] = join_info.joins[-1] return alias or seen[None] def add_annotation(self, annotation, alias, is_summary=False): """Add a single annotation expression to the Query.""" annotation = annotation.resolve_expression(self, allow_joins=True, reuse=None, summarize=is_summary) self.append_annotation_mask([alias]) self.annotations[alias] = annotation def resolve_expression(self, query, *args, **kwargs): clone = self.clone() # Subqueries need to use a different set of aliases than the outer query. clone.bump_prefix(query) clone.subquery = True # It's safe to drop ordering if the queryset isn't using slicing, # distinct(*fields) or select_for_update(). if (self.low_mark == 0 and self.high_mark is None and not self.distinct_fields and not self.select_for_update): clone.clear_ordering(True) clone.where.resolve_expression(query, *args, **kwargs) for key, value in clone.annotations.items(): resolved = value.resolve_expression(query, *args, **kwargs) if hasattr(resolved, 'external_aliases'): resolved.external_aliases.update(clone.alias_map) clone.annotations[key] = resolved # Outer query's aliases are considered external. clone.external_aliases.update( alias for alias, table in query.alias_map.items() if ( isinstance(table, Join) and table.join_field.related_model._meta.db_table != alias ) or ( isinstance(table, BaseTable) and table.table_name != table.table_alias ) ) return clone def as_sql(self, compiler, connection): sql, params = self.get_compiler(connection=connection).as_sql() if self.subquery: sql = '(%s)' % sql return sql, params def resolve_lookup_value(self, value, can_reuse, allow_joins, simple_col): if hasattr(value, 'resolve_expression'): kwargs = {'reuse': can_reuse, 'allow_joins': allow_joins} if isinstance(value, F): kwargs['simple_col'] = simple_col value = value.resolve_expression(self, **kwargs) elif isinstance(value, (list, tuple)): # The items of the iterable may be expressions and therefore need # to be resolved independently. for sub_value in value: if hasattr(sub_value, 'resolve_expression'): if isinstance(sub_value, F): sub_value.resolve_expression( self, reuse=can_reuse, allow_joins=allow_joins, simple_col=simple_col, ) else: sub_value.resolve_expression(self, reuse=can_reuse, allow_joins=allow_joins) return value def solve_lookup_type(self, lookup): """ Solve the lookup type from the lookup (e.g.: 'foobar__id__icontains'). """ lookup_splitted = lookup.split(LOOKUP_SEP) if self.annotations: expression, expression_lookups = refs_expression(lookup_splitted, self.annotations) if expression: return expression_lookups, (), expression _, field, _, lookup_parts = self.names_to_path(lookup_splitted, self.get_meta()) field_parts = lookup_splitted[0:len(lookup_splitted) - len(lookup_parts)] if len(lookup_parts) > 1 and not field_parts: raise FieldError( 'Invalid lookup "%s" for model %s".' % (lookup, self.get_meta().model.__name__) ) return lookup_parts, field_parts, False def check_query_object_type(self, value, opts, field): """ Check whether the object passed while querying is of the correct type. If not, raise a ValueError specifying the wrong object. """ if hasattr(value, '_meta'): if not check_rel_lookup_compatibility(value._meta.model, opts, field): raise ValueError( 'Cannot query "%s": Must be "%s" instance.' % (value, opts.object_name)) def check_related_objects(self, field, value, opts): """Check the type of object passed to query relations.""" if field.is_relation: # Check that the field and the queryset use the same model in a # query like .filter(author=Author.objects.all()). For example, the # opts would be Author's (from the author field) and value.model # would be Author.objects.all() queryset's .model (Author also). # The field is the related field on the lhs side. if (isinstance(value, Query) and not value.has_select_fields and not check_rel_lookup_compatibility(value.model, opts, field)): raise ValueError( 'Cannot use QuerySet for "%s": Use a QuerySet for "%s".' % (value.model._meta.object_name, opts.object_name) ) elif hasattr(value, '_meta'): self.check_query_object_type(value, opts, field) elif hasattr(value, '__iter__'): for v in value: self.check_query_object_type(v, opts, field) def build_lookup(self, lookups, lhs, rhs): """ Try to extract transforms and lookup from given lhs. The lhs value is something that works like SQLExpression. The rhs value is what the lookup is going to compare against. The lookups is a list of names to extract using get_lookup() and get_transform(). """ # __exact is the default lookup if one isn't given. *transforms, lookup_name = lookups or ['exact'] for name in transforms: lhs = self.try_transform(lhs, name) # First try get_lookup() so that the lookup takes precedence if the lhs # supports both transform and lookup for the name. lookup_class = lhs.get_lookup(lookup_name) if not lookup_class: if lhs.field.is_relation: raise FieldError('Related Field got invalid lookup: {}'.format(lookup_name)) # A lookup wasn't found. Try to interpret the name as a transform # and do an Exact lookup against it. lhs = self.try_transform(lhs, lookup_name) lookup_name = 'exact' lookup_class = lhs.get_lookup(lookup_name) if not lookup_class: return lookup = lookup_class(lhs, rhs) # Interpret '__exact=None' as the sql 'is NULL'; otherwise, reject all # uses of None as a query value unless the lookup supports it. if lookup.rhs is None and not lookup.can_use_none_as_rhs: if lookup_name not in ('exact', 'iexact'): raise ValueError("Cannot use None as a query value") return lhs.get_lookup('isnull')(lhs, True) # For Oracle '' is equivalent to null. The check must be done at this # stage because join promotion can't be done in the compiler. Using # DEFAULT_DB_ALIAS isn't nice but it's the best that can be done here. # A similar thing is done in is_nullable(), too. if (connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls and lookup_name == 'exact' and lookup.rhs == ''): return lhs.get_lookup('isnull')(lhs, True) return lookup def try_transform(self, lhs, name): """ Helper method for build_lookup(). Try to fetch and initialize a transform for name parameter from lhs. """ transform_class = lhs.get_transform(name) if transform_class: return transform_class(lhs) else: output_field = lhs.output_field.__class__ suggested_lookups = difflib.get_close_matches(name, output_field.get_lookups()) if suggested_lookups: suggestion = ', perhaps you meant %s?' % ' or '.join(suggested_lookups) else: suggestion = '.' raise FieldError( "Unsupported lookup '%s' for %s or join on the field not " "permitted%s" % (name, output_field.__name__, suggestion) ) def build_filter(self, filter_expr, branch_negated=False, current_negated=False, can_reuse=None, allow_joins=True, split_subq=True, reuse_with_filtered_relation=False, simple_col=False): """ Build a WhereNode for a single filter clause but don't add it to this Query. Query.add_q() will then add this filter to the where Node. The 'branch_negated' tells us if the current branch contains any negations. This will be used to determine if subqueries are needed. The 'current_negated' is used to determine if the current filter is negated or not and this will be used to determine if IS NULL filtering is needed. The difference between current_negated and branch_negated is that branch_negated is set on first negation, but current_negated is flipped for each negation. Note that add_filter will not do any negating itself, that is done upper in the code by add_q(). The 'can_reuse' is a set of reusable joins for multijoins. If 'reuse_with_filtered_relation' is True, then only joins in can_reuse will be reused. The method will create a filter clause that can be added to the current query. However, if the filter isn't added to the query then the caller is responsible for unreffing the joins used. """ if isinstance(filter_expr, dict): raise FieldError("Cannot parse keyword query as dict") arg, value = filter_expr if not arg: raise FieldError("Cannot parse keyword query %r" % arg) lookups, parts, reffed_expression = self.solve_lookup_type(arg) if not getattr(reffed_expression, 'filterable', True): raise NotSupportedError( reffed_expression.__class__.__name__ + ' is disallowed in ' 'the filter clause.' ) if not allow_joins and len(parts) > 1: raise FieldError("Joined field references are not permitted in this query") pre_joins = self.alias_refcount.copy() value = self.resolve_lookup_value(value, can_reuse, allow_joins, simple_col) used_joins = {k for k, v in self.alias_refcount.items() if v > pre_joins.get(k, 0)} clause = self.where_class() if reffed_expression: condition = self.build_lookup(lookups, reffed_expression, value) clause.add(condition, AND) return clause, [] opts = self.get_meta() alias = self.get_initial_alias() allow_many = not branch_negated or not split_subq try: join_info = self.setup_joins( parts, opts, alias, can_reuse=can_reuse, allow_many=allow_many, reuse_with_filtered_relation=reuse_with_filtered_relation, ) # Prevent iterator from being consumed by check_related_objects() if isinstance(value, Iterator): value = list(value) self.check_related_objects(join_info.final_field, value, join_info.opts) # split_exclude() needs to know which joins were generated for the # lookup parts self._lookup_joins = join_info.joins except MultiJoin as e: return self.split_exclude(filter_expr, can_reuse, e.names_with_path) # Update used_joins before trimming since they are reused to determine # which joins could be later promoted to INNER. used_joins.update(join_info.joins) targets, alias, join_list = self.trim_joins(join_info.targets, join_info.joins, join_info.path) if can_reuse is not None: can_reuse.update(join_list) if join_info.final_field.is_relation: # No support for transforms for relational fields num_lookups = len(lookups) if num_lookups > 1: raise FieldError('Related Field got invalid lookup: {}'.format(lookups[0])) if len(targets) == 1: col = _get_col(targets[0], join_info.final_field, alias, simple_col) else: col = MultiColSource(alias, targets, join_info.targets, join_info.final_field) else: col = _get_col(targets[0], join_info.final_field, alias, simple_col) condition = self.build_lookup(lookups, col, value) lookup_type = condition.lookup_name clause.add(condition, AND) require_outer = lookup_type == 'isnull' and condition.rhs is True and not current_negated if current_negated and (lookup_type != 'isnull' or condition.rhs is False) and condition.rhs is not None: require_outer = True if (lookup_type != 'isnull' and ( self.is_nullable(targets[0]) or self.alias_map[join_list[-1]].join_type == LOUTER)): # The condition added here will be SQL like this: # NOT (col IS NOT NULL), where the first NOT is added in # upper layers of code. The reason for addition is that if col # is null, then col != someval will result in SQL "unknown" # which isn't the same as in Python. The Python None handling # is wanted, and it can be gotten by # (col IS NULL OR col != someval) # <=> # NOT (col IS NOT NULL AND col = someval). lookup_class = targets[0].get_lookup('isnull') col = _get_col(targets[0], join_info.targets[0], alias, simple_col) clause.add(lookup_class(col, False), AND) return clause, used_joins if not require_outer else () def add_filter(self, filter_clause): self.add_q(Q(**{filter_clause[0]: filter_clause[1]})) def add_q(self, q_object): """ A preprocessor for the internal _add_q(). Responsible for doing final join promotion. """ # For join promotion this case is doing an AND for the added q_object # and existing conditions. So, any existing inner join forces the join # type to remain inner. Existing outer joins can however be demoted. # (Consider case where rel_a is LOUTER and rel_a__col=1 is added - if # rel_a doesn't produce any rows, then the whole condition must fail. # So, demotion is OK. existing_inner = {a for a in self.alias_map if self.alias_map[a].join_type == INNER} clause, _ = self._add_q(q_object, self.used_aliases) if clause: self.where.add(clause, AND) self.demote_joins(existing_inner) def build_where(self, q_object): return self._add_q(q_object, used_aliases=set(), allow_joins=False, simple_col=True)[0] def _add_q(self, q_object, used_aliases, branch_negated=False, current_negated=False, allow_joins=True, split_subq=True, simple_col=False): """Add a Q-object to the current filter.""" connector = q_object.connector current_negated = current_negated ^ q_object.negated branch_negated = branch_negated or q_object.negated target_clause = self.where_class(connector=connector, negated=q_object.negated) joinpromoter = JoinPromoter(q_object.connector, len(q_object.children), current_negated) for child in q_object.children: if isinstance(child, Node): child_clause, needed_inner = self._add_q( child, used_aliases, branch_negated, current_negated, allow_joins, split_subq, simple_col) joinpromoter.add_votes(needed_inner) else: child_clause, needed_inner = self.build_filter( child, can_reuse=used_aliases, branch_negated=branch_negated, current_negated=current_negated, allow_joins=allow_joins, split_subq=split_subq, simple_col=simple_col, ) joinpromoter.add_votes(needed_inner) if child_clause: target_clause.add(child_clause, connector) needed_inner = joinpromoter.update_join_types(self) return target_clause, needed_inner def build_filtered_relation_q(self, q_object, reuse, branch_negated=False, current_negated=False): """Add a FilteredRelation object to the current filter.""" connector = q_object.connector current_negated ^= q_object.negated branch_negated = branch_negated or q_object.negated target_clause = self.where_class(connector=connector, negated=q_object.negated) for child in q_object.children: if isinstance(child, Node): child_clause = self.build_filtered_relation_q( child, reuse=reuse, branch_negated=branch_negated, current_negated=current_negated, ) else: child_clause, _ = self.build_filter( child, can_reuse=reuse, branch_negated=branch_negated, current_negated=current_negated, allow_joins=True, split_subq=False, reuse_with_filtered_relation=True, ) target_clause.add(child_clause, connector) return target_clause def add_filtered_relation(self, filtered_relation, alias): filtered_relation.alias = alias lookups = dict(get_children_from_q(filtered_relation.condition)) for lookup in chain((filtered_relation.relation_name,), lookups): lookup_parts, field_parts, _ = self.solve_lookup_type(lookup) shift = 2 if not lookup_parts else 1 if len(field_parts) > (shift + len(lookup_parts)): raise ValueError( "FilteredRelation's condition doesn't support nested " "relations (got %r)." % lookup ) self._filtered_relations[filtered_relation.alias] = filtered_relation def names_to_path(self, names, opts, allow_many=True, fail_on_missing=False): """ Walk the list of names and turns them into PathInfo tuples. A single name in 'names' can generate multiple PathInfos (m2m, for example). 'names' is the path of names to travel, 'opts' is the model Options we start the name resolving from, 'allow_many' is as for setup_joins(). If fail_on_missing is set to True, then a name that can't be resolved will generate a FieldError. Return a list of PathInfo tuples. In addition return the final field (the last used join field) and target (which is a field guaranteed to contain the same value as the final field). Finally, return those names that weren't found (which are likely transforms and the final lookup). """ path, names_with_path = [], [] for pos, name in enumerate(names): cur_names_with_path = (name, []) if name == 'pk': name = opts.pk.name field = None filtered_relation = None try: field = opts.get_field(name) except FieldDoesNotExist: if name in self.annotation_select: field = self.annotation_select[name].output_field elif name in self._filtered_relations and pos == 0: filtered_relation = self._filtered_relations[name] field = opts.get_field(filtered_relation.relation_name) if field is not None: # Fields that contain one-to-many relations with a generic # model (like a GenericForeignKey) cannot generate reverse # relations and therefore cannot be used for reverse querying. if field.is_relation and not field.related_model: raise FieldError( "Field %r does not generate an automatic reverse " "relation and therefore cannot be used for reverse " "querying. If it is a GenericForeignKey, consider " "adding a GenericRelation." % name ) try: model = field.model._meta.concrete_model except AttributeError: # QuerySet.annotate() may introduce fields that aren't # attached to a model. model = None else: # We didn't find the current field, so move position back # one step. pos -= 1 if pos == -1 or fail_on_missing: available = sorted([ *get_field_names_from_opts(opts), *self.annotation_select, *self._filtered_relations, ]) raise FieldError("Cannot resolve keyword '%s' into field. " "Choices are: %s" % (name, ", ".join(available))) break # Check if we need any joins for concrete inheritance cases (the # field lives in parent, but we are currently in one of its # children) if model is not opts.model: path_to_parent = opts.get_path_to_parent(model) if path_to_parent: path.extend(path_to_parent) cur_names_with_path[1].extend(path_to_parent) opts = path_to_parent[-1].to_opts if hasattr(field, 'get_path_info'): pathinfos = field.get_path_info(filtered_relation) if not allow_many: for inner_pos, p in enumerate(pathinfos): if p.m2m: cur_names_with_path[1].extend(pathinfos[0:inner_pos + 1]) names_with_path.append(cur_names_with_path) raise MultiJoin(pos + 1, names_with_path) last = pathinfos[-1] path.extend(pathinfos) final_field = last.join_field opts = last.to_opts targets = last.target_fields cur_names_with_path[1].extend(pathinfos) names_with_path.append(cur_names_with_path) else: # Local non-relational field. final_field = field targets = (field,) if fail_on_missing and pos + 1 != len(names): raise FieldError( "Cannot resolve keyword %r into field. Join on '%s'" " not permitted." % (names[pos + 1], name)) break return path, final_field, targets, names[pos + 1:] def setup_joins(self, names, opts, alias, can_reuse=None, allow_many=True, reuse_with_filtered_relation=False): """ Compute the necessary table joins for the passage through the fields given in 'names'. 'opts' is the Options class for the current model (which gives the table we are starting from), 'alias' is the alias for the table to start the joining from. The 'can_reuse' defines the reverse foreign key joins we can reuse. It can be None in which case all joins are reusable or a set of aliases that can be reused. Note that non-reverse foreign keys are always reusable when using setup_joins(). The 'reuse_with_filtered_relation' can be used to force 'can_reuse' parameter and force the relation on the given connections. If 'allow_many' is False, then any reverse foreign key seen will generate a MultiJoin exception. Return the final field involved in the joins, the target field (used for any 'where' constraint), the final 'opts' value, the joins, the field path traveled to generate the joins, and a transform function that takes a field and alias and is equivalent to `field.get_col(alias)` in the simple case but wraps field transforms if they were included in names. The target field is the field containing the concrete value. Final field can be something different, for example foreign key pointing to that value. Final field is needed for example in some value conversions (convert 'obj' in fk__id=obj to pk val using the foreign key field for example). """ joins = [alias] # The transform can't be applied yet, as joins must be trimmed later. # To avoid making every caller of this method look up transforms # directly, compute transforms here and create a partial that converts # fields to the appropriate wrapped version. def final_transformer(field, alias): return field.get_col(alias) # Try resolving all the names as fields first. If there's an error, # treat trailing names as lookups until a field can be resolved. last_field_exception = None for pivot in range(len(names), 0, -1): try: path, final_field, targets, rest = self.names_to_path( names[:pivot], opts, allow_many, fail_on_missing=True, ) except FieldError as exc: if pivot == 1: # The first item cannot be a lookup, so it's safe # to raise the field error here. raise else: last_field_exception = exc else: # The transforms are the remaining items that couldn't be # resolved into fields. transforms = names[pivot:] break for name in transforms: def transform(field, alias, *, name, previous): try: wrapped = previous(field, alias) return self.try_transform(wrapped, name) except FieldError: # FieldError is raised if the transform doesn't exist. if isinstance(final_field, Field) and last_field_exception: raise last_field_exception else: raise final_transformer = functools.partial(transform, name=name, previous=final_transformer) # Then, add the path to the query's joins. Note that we can't trim # joins at this stage - we will need the information about join type # of the trimmed joins. for join in path: if join.filtered_relation: filtered_relation = join.filtered_relation.clone() table_alias = filtered_relation.alias else: filtered_relation = None table_alias = None opts = join.to_opts if join.direct: nullable = self.is_nullable(join.join_field) else: nullable = True connection = Join( opts.db_table, alias, table_alias, INNER, join.join_field, nullable, filtered_relation=filtered_relation, ) reuse = can_reuse if join.m2m or reuse_with_filtered_relation else None alias = self.join( connection, reuse=reuse, reuse_with_filtered_relation=reuse_with_filtered_relation, ) joins.append(alias) if filtered_relation: filtered_relation.path = joins[:] return JoinInfo(final_field, targets, opts, joins, path, final_transformer) def trim_joins(self, targets, joins, path): """ The 'target' parameter is the final field being joined to, 'joins' is the full list of join aliases. The 'path' contain the PathInfos used to create the joins. Return the final target field and table alias and the new active joins. Always trim any direct join if the target column is already in the previous table. Can't trim reverse joins as it's unknown if there's anything on the other side of the join. """ joins = joins[:] for pos, info in enumerate(reversed(path)): if len(joins) == 1 or not info.direct: break if info.filtered_relation: break join_targets = {t.column for t in info.join_field.foreign_related_fields} cur_targets = {t.column for t in targets} if not cur_targets.issubset(join_targets): break targets_dict = {r[1].column: r[0] for r in info.join_field.related_fields if r[1].column in cur_targets} targets = tuple(targets_dict[t.column] for t in targets) self.unref_alias(joins.pop()) return targets, joins[-1], joins def resolve_ref(self, name, allow_joins=True, reuse=None, summarize=False, simple_col=False): if not allow_joins and LOOKUP_SEP in name: raise FieldError("Joined field references are not permitted in this query") if name in self.annotations: if summarize: # Summarize currently means we are doing an aggregate() query # which is executed as a wrapped subquery if any of the # aggregate() elements reference an existing annotation. In # that case we need to return a Ref to the subquery's annotation. return Ref(name, self.annotation_select[name]) else: return self.annotations[name] else: field_list = name.split(LOOKUP_SEP) join_info = self.setup_joins(field_list, self.get_meta(), self.get_initial_alias(), can_reuse=reuse) targets, final_alias, join_list = self.trim_joins(join_info.targets, join_info.joins, join_info.path) if not allow_joins and len(join_list) > 1: raise FieldError('Joined field references are not permitted in this query') if len(targets) > 1: raise FieldError("Referencing multicolumn fields with F() objects " "isn't supported") # Verify that the last lookup in name is a field or a transform: # transform_function() raises FieldError if not. join_info.transform_function(targets[0], final_alias) if reuse is not None: reuse.update(join_list) col = _get_col(targets[0], join_info.targets[0], join_list[-1], simple_col) return col def split_exclude(self, filter_expr, can_reuse, names_with_path): """ When doing an exclude against any kind of N-to-many relation, we need to use a subquery. This method constructs the nested query, given the original exclude filter (filter_expr) and the portion up to the first N-to-many relation field. For example, if the origin filter is ~Q(child__name='foo'), filter_expr is ('child__name', 'foo') and can_reuse is a set of joins usable for filters in the original query. We will turn this into equivalent of: WHERE NOT (pk IN (SELECT parent_id FROM thetable WHERE name = 'foo' AND parent_id IS NOT NULL)) It might be worth it to consider using WHERE NOT EXISTS as that has saner null handling, and is easier for the backend's optimizer to handle. """ filter_lhs, filter_rhs = filter_expr if isinstance(filter_rhs, F): filter_expr = (filter_lhs, OuterRef(filter_rhs.name)) # Generate the inner query. query = Query(self.model) query._filtered_relations = self._filtered_relations query.add_filter(filter_expr) query.clear_ordering(True) # Try to have as simple as possible subquery -> trim leading joins from # the subquery. trimmed_prefix, contains_louter = query.trim_start(names_with_path) # Add extra check to make sure the selected field will not be null # since we are adding an IN <subquery> clause. This prevents the # database from tripping over IN (...,NULL,...) selects and returning # nothing col = query.select[0] select_field = col.target alias = col.alias if self.is_nullable(select_field): lookup_class = select_field.get_lookup('isnull') lookup = lookup_class(select_field.get_col(alias), False) query.where.add(lookup, AND) if alias in can_reuse: pk = select_field.model._meta.pk # Need to add a restriction so that outer query's filters are in effect for # the subquery, too. query.bump_prefix(self) lookup_class = select_field.get_lookup('exact') # Note that the query.select[0].alias is different from alias # due to bump_prefix above. lookup = lookup_class(pk.get_col(query.select[0].alias), pk.get_col(alias)) query.where.add(lookup, AND) query.external_aliases.add(alias) condition, needed_inner = self.build_filter( ('%s__in' % trimmed_prefix, query), current_negated=True, branch_negated=True, can_reuse=can_reuse) if contains_louter: or_null_condition, _ = self.build_filter( ('%s__isnull' % trimmed_prefix, True), current_negated=True, branch_negated=True, can_reuse=can_reuse) condition.add(or_null_condition, OR) # Note that the end result will be: # (outercol NOT IN innerq AND outercol IS NOT NULL) OR outercol IS NULL. # This might look crazy but due to how IN works, this seems to be # correct. If the IS NOT NULL check is removed then outercol NOT # IN will return UNKNOWN. If the IS NULL check is removed, then if # outercol IS NULL we will not match the row. return condition, needed_inner def set_empty(self): self.where.add(NothingNode(), AND) def is_empty(self): return any(isinstance(c, NothingNode) for c in self.where.children) def set_limits(self, low=None, high=None): """ Adjust the limits on the rows retrieved. Use low/high to set these, as it makes it more Pythonic to read and write. When the SQL query is created, convert them to the appropriate offset and limit values. Apply any limits passed in here to the existing constraints. Add low to the current low value and clamp both to any existing high value. """ if high is not None: if self.high_mark is not None: self.high_mark = min(self.high_mark, self.low_mark + high) else: self.high_mark = self.low_mark + high if low is not None: if self.high_mark is not None: self.low_mark = min(self.high_mark, self.low_mark + low) else: self.low_mark = self.low_mark + low if self.low_mark == self.high_mark: self.set_empty() def clear_limits(self): """Clear any existing limits.""" self.low_mark, self.high_mark = 0, None def has_limit_one(self): return self.high_mark is not None and (self.high_mark - self.low_mark) == 1 def can_filter(self): """ Return True if adding filters to this instance is still possible. Typically, this means no limits or offsets have been put on the results. """ return not self.low_mark and self.high_mark is None def clear_select_clause(self): """Remove all fields from SELECT clause.""" self.select = () self.default_cols = False self.select_related = False self.set_extra_mask(()) self.set_annotation_mask(()) def clear_select_fields(self): """ Clear the list of fields to select (but not extra_select columns). Some queryset types completely replace any existing list of select columns. """ self.select = () self.values_select = () def add_select_col(self, col): self.select += col, self.values_select += col.output_field.name, def set_select(self, cols): self.default_cols = False self.select = tuple(cols) def add_distinct_fields(self, *field_names): """ Add and resolve the given fields to the query's "distinct on" clause. """ self.distinct_fields = field_names self.distinct = True def add_fields(self, field_names, allow_m2m=True): """ Add the given (model) fields to the select set. Add the field names in the order specified. """ alias = self.get_initial_alias() opts = self.get_meta() try: cols = [] for name in field_names: # Join promotion note - we must not remove any rows here, so # if there is no existing joins, use outer join. join_info = self.setup_joins(name.split(LOOKUP_SEP), opts, alias, allow_many=allow_m2m) targets, final_alias, joins = self.trim_joins( join_info.targets, join_info.joins, join_info.path, ) for target in targets: cols.append(join_info.transform_function(target, final_alias)) if cols: self.set_select(cols) except MultiJoin: raise FieldError("Invalid field name: '%s'" % name) except FieldError: if LOOKUP_SEP in name: # For lookups spanning over relationships, show the error # from the model on which the lookup failed. raise else: names = sorted([ *get_field_names_from_opts(opts), *self.extra, *self.annotation_select, *self._filtered_relations ]) raise FieldError("Cannot resolve keyword %r into field. " "Choices are: %s" % (name, ", ".join(names))) def add_ordering(self, *ordering): """ Add items from the 'ordering' sequence to the query's "order by" clause. These items are either field names (not column names) -- possibly with a direction prefix ('-' or '?') -- or OrderBy expressions. If 'ordering' is empty, clear all ordering from the query. """ errors = [] for item in ordering: if not hasattr(item, 'resolve_expression') and not ORDER_PATTERN.match(item): errors.append(item) if getattr(item, 'contains_aggregate', False): raise FieldError( 'Using an aggregate in order_by() without also including ' 'it in annotate() is not allowed: %s' % item ) if errors: raise FieldError('Invalid order_by arguments: %s' % errors) if ordering: self.order_by += ordering else: self.default_ordering = False def clear_ordering(self, force_empty): """ Remove any ordering settings. If 'force_empty' is True, there will be no ordering in the resulting query (not even the model's default). """ self.order_by = () self.extra_order_by = () if force_empty: self.default_ordering = False def set_group_by(self): """ Expand the GROUP BY clause required by the query. This will usually be the set of all non-aggregate fields in the return data. If the database backend supports grouping by the primary key, and the query would be equivalent, the optimization will be made automatically. """ group_by = list(self.select) if self.annotation_select: for alias, annotation in self.annotation_select.items(): try: inspect.getcallargs(annotation.get_group_by_cols, alias=alias) except TypeError: annotation_class = annotation.__class__ msg = ( '`alias=None` must be added to the signature of ' '%s.%s.get_group_by_cols().' ) % (annotation_class.__module__, annotation_class.__qualname__) warnings.warn(msg, category=RemovedInDjango40Warning) group_by_cols = annotation.get_group_by_cols() else: group_by_cols = annotation.get_group_by_cols(alias=alias) group_by.extend(group_by_cols) self.group_by = tuple(group_by) def add_select_related(self, fields): """ Set up the select_related data structure so that we only select certain related models (as opposed to all models, when self.select_related=True). """ if isinstance(self.select_related, bool): field_dict = {} else: field_dict = self.select_related for field in fields: d = field_dict for part in field.split(LOOKUP_SEP): d = d.setdefault(part, {}) self.select_related = field_dict def add_extra(self, select, select_params, where, params, tables, order_by): """ Add data to the various extra_* attributes for user-created additions to the query. """ if select: # We need to pair any placeholder markers in the 'select' # dictionary with their parameters in 'select_params' so that # subsequent updates to the select dictionary also adjust the # parameters appropriately. select_pairs = {} if select_params: param_iter = iter(select_params) else: param_iter = iter([]) for name, entry in select.items(): entry = str(entry) entry_params = [] pos = entry.find("%s") while pos != -1: if pos == 0 or entry[pos - 1] != '%': entry_params.append(next(param_iter)) pos = entry.find("%s", pos + 2) select_pairs[name] = (entry, entry_params) self.extra.update(select_pairs) if where or params: self.where.add(ExtraWhere(where, params), AND) if tables: self.extra_tables += tuple(tables) if order_by: self.extra_order_by = order_by def clear_deferred_loading(self): """Remove any fields from the deferred loading set.""" self.deferred_loading = (frozenset(), True) def add_deferred_loading(self, field_names): """ Add the given list of model field names to the set of fields to exclude from loading from the database when automatic column selection is done. Add the new field names to any existing field names that are deferred (or removed from any existing field names that are marked as the only ones for immediate loading). """ # Fields on related models are stored in the literal double-underscore # format, so that we can use a set datastructure. We do the foo__bar # splitting and handling when computing the SQL column names (as part of # get_columns()). existing, defer = self.deferred_loading if defer: # Add to existing deferred names. self.deferred_loading = existing.union(field_names), True else: # Remove names from the set of any existing "immediate load" names. self.deferred_loading = existing.difference(field_names), False def add_immediate_loading(self, field_names): """ Add the given list of model field names to the set of fields to retrieve when the SQL is executed ("immediate loading" fields). The field names replace any existing immediate loading field names. If there are field names already specified for deferred loading, remove those names from the new field_names before storing the new names for immediate loading. (That is, immediate loading overrides any existing immediate values, but respects existing deferrals.) """ existing, defer = self.deferred_loading field_names = set(field_names) if 'pk' in field_names: field_names.remove('pk') field_names.add(self.get_meta().pk.name) if defer: # Remove any existing deferred names from the current set before # setting the new names. self.deferred_loading = field_names.difference(existing), False else: # Replace any existing "immediate load" field names. self.deferred_loading = frozenset(field_names), False def get_loaded_field_names(self): """ If any fields are marked to be deferred, return a dictionary mapping models to a set of names in those fields that will be loaded. If a model is not in the returned dictionary, none of its fields are deferred. If no fields are marked for deferral, return an empty dictionary. """ # We cache this because we call this function multiple times # (compiler.fill_related_selections, query.iterator) try: return self._loaded_field_names_cache except AttributeError: collection = {} self.deferred_to_data(collection, self.get_loaded_field_names_cb) self._loaded_field_names_cache = collection return collection def get_loaded_field_names_cb(self, target, model, fields): """Callback used by get_deferred_field_names().""" target[model] = {f.attname for f in fields} def set_annotation_mask(self, names): """Set the mask of annotations that will be returned by the SELECT.""" if names is None: self.annotation_select_mask = None else: self.annotation_select_mask = set(names) self._annotation_select_cache = None def append_annotation_mask(self, names): if self.annotation_select_mask is not None: self.set_annotation_mask(self.annotation_select_mask.union(names)) def set_extra_mask(self, names): """ Set the mask of extra select items that will be returned by SELECT. Don't remove them from the Query since they might be used later. """ if names is None: self.extra_select_mask = None else: self.extra_select_mask = set(names) self._extra_select_cache = None def set_values(self, fields): self.select_related = False self.clear_deferred_loading() self.clear_select_fields() if self.group_by is True: self.add_fields((f.attname for f in self.model._meta.concrete_fields), False) self.set_group_by() self.clear_select_fields() if fields: field_names = [] extra_names = [] annotation_names = [] if not self.extra and not self.annotations: # Shortcut - if there are no extra or annotations, then # the values() clause must be just field names. field_names = list(fields) else: self.default_cols = False for f in fields: if f in self.extra_select: extra_names.append(f) elif f in self.annotation_select: annotation_names.append(f) else: field_names.append(f) self.set_extra_mask(extra_names) self.set_annotation_mask(annotation_names) else: field_names = [f.attname for f in self.model._meta.concrete_fields] self.values_select = tuple(field_names) self.add_fields(field_names, True) @property def annotation_select(self): """ Return the dictionary of aggregate columns that are not masked and should be used in the SELECT clause. Cache this result for performance. """ if self._annotation_select_cache is not None: return self._annotation_select_cache elif not self.annotations: return {} elif self.annotation_select_mask is not None: self._annotation_select_cache = { k: v for k, v in self.annotations.items() if k in self.annotation_select_mask } return self._annotation_select_cache else: return self.annotations @property def extra_select(self): if self._extra_select_cache is not None: return self._extra_select_cache if not self.extra: return {} elif self.extra_select_mask is not None: self._extra_select_cache = { k: v for k, v in self.extra.items() if k in self.extra_select_mask } return self._extra_select_cache else: return self.extra def trim_start(self, names_with_path): """ Trim joins from the start of the join path. The candidates for trim are the PathInfos in names_with_path structure that are m2m joins. Also set the select column so the start matches the join. This method is meant to be used for generating the subquery joins & cols in split_exclude(). Return a lookup usable for doing outerq.filter(lookup=self) and a boolean indicating if the joins in the prefix contain a LEFT OUTER join. _""" all_paths = [] for _, paths in names_with_path: all_paths.extend(paths) contains_louter = False # Trim and operate only on tables that were generated for # the lookup part of the query. That is, avoid trimming # joins generated for F() expressions. lookup_tables = [ t for t in self.alias_map if t in self._lookup_joins or t == self.base_table ] for trimmed_paths, path in enumerate(all_paths): if path.m2m: break if self.alias_map[lookup_tables[trimmed_paths + 1]].join_type == LOUTER: contains_louter = True alias = lookup_tables[trimmed_paths] self.unref_alias(alias) # The path.join_field is a Rel, lets get the other side's field join_field = path.join_field.field # Build the filter prefix. paths_in_prefix = trimmed_paths trimmed_prefix = [] for name, path in names_with_path: if paths_in_prefix - len(path) < 0: break trimmed_prefix.append(name) paths_in_prefix -= len(path) trimmed_prefix.append( join_field.foreign_related_fields[0].name) trimmed_prefix = LOOKUP_SEP.join(trimmed_prefix) # Lets still see if we can trim the first join from the inner query # (that is, self). We can't do this for: # - LEFT JOINs because we would miss those rows that have nothing on # the outer side, # - INNER JOINs from filtered relations because we would miss their # filters. first_join = self.alias_map[lookup_tables[trimmed_paths + 1]] if first_join.join_type != LOUTER and not first_join.filtered_relation: select_fields = [r[0] for r in join_field.related_fields] select_alias = lookup_tables[trimmed_paths + 1] self.unref_alias(lookup_tables[trimmed_paths]) extra_restriction = join_field.get_extra_restriction( self.where_class, None, lookup_tables[trimmed_paths + 1]) if extra_restriction: self.where.add(extra_restriction, AND) else: # TODO: It might be possible to trim more joins from the start of the # inner query if it happens to have a longer join chain containing the # values in select_fields. Lets punt this one for now. select_fields = [r[1] for r in join_field.related_fields] select_alias = lookup_tables[trimmed_paths] # The found starting point is likely a Join instead of a BaseTable reference. # But the first entry in the query's FROM clause must not be a JOIN. for table in self.alias_map: if self.alias_refcount[table] > 0: self.alias_map[table] = BaseTable(self.alias_map[table].table_name, table) break self.set_select([f.get_col(select_alias) for f in select_fields]) return trimmed_prefix, contains_louter def is_nullable(self, field): """ Check if the given field should be treated as nullable. Some backends treat '' as null and Django treats such fields as nullable for those backends. In such situations field.null can be False even if we should treat the field as nullable. """ # We need to use DEFAULT_DB_ALIAS here, as QuerySet does not have # (nor should it have) knowledge of which connection is going to be # used. The proper fix would be to defer all decisions where # is_nullable() is needed to the compiler stage, but that is not easy # to do currently. return ( connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls and field.empty_strings_allowed ) or field.null def get_order_dir(field, default='ASC'): """ Return the field name and direction for an order specification. For example, '-foo' is returned as ('foo', 'DESC'). The 'default' param is used to indicate which way no prefix (or a '+' prefix) should sort. The '-' prefix always sorts the opposite way. """ dirn = ORDER_DIR[default] if field[0] == '-': return field[1:], dirn[1] return field, dirn[0] def add_to_dict(data, key, value): """ Add "value" to the set of values for "key", whether or not "key" already exists. """ if key in data: data[key].add(value) else: data[key] = {value} def is_reverse_o2o(field): """ Check if the given field is reverse-o2o. The field is expected to be some sort of relation field or related object. """ return field.is_relation and field.one_to_one and not field.concrete class JoinPromoter: """ A class to abstract away join promotion problems for complex filter conditions. """ def __init__(self, connector, num_children, negated): self.connector = connector self.negated = negated if self.negated: if connector == AND: self.effective_connector = OR else: self.effective_connector = AND else: self.effective_connector = self.connector self.num_children = num_children # Maps of table alias to how many times it is seen as required for # inner and/or outer joins. self.votes = Counter() def add_votes(self, votes): """ Add single vote per item to self.votes. Parameter can be any iterable. """ self.votes.update(votes) def update_join_types(self, query): """ Change join types so that the generated query is as efficient as possible, but still correct. So, change as many joins as possible to INNER, but don't make OUTER joins INNER if that could remove results from the query. """ to_promote = set() to_demote = set() # The effective_connector is used so that NOT (a AND b) is treated # similarly to (a OR b) for join promotion. for table, votes in self.votes.items(): # We must use outer joins in OR case when the join isn't contained # in all of the joins. Otherwise the INNER JOIN itself could remove # valid results. Consider the case where a model with rel_a and # rel_b relations is queried with rel_a__col=1 | rel_b__col=2. Now, # if rel_a join doesn't produce any results is null (for example # reverse foreign key or null value in direct foreign key), and # there is a matching row in rel_b with col=2, then an INNER join # to rel_a would remove a valid match from the query. So, we need # to promote any existing INNER to LOUTER (it is possible this # promotion in turn will be demoted later on). if self.effective_connector == 'OR' and votes < self.num_children: to_promote.add(table) # If connector is AND and there is a filter that can match only # when there is a joinable row, then use INNER. For example, in # rel_a__col=1 & rel_b__col=2, if either of the rels produce NULL # as join output, then the col=1 or col=2 can't match (as # NULL=anything is always false). # For the OR case, if all children voted for a join to be inner, # then we can use INNER for the join. For example: # (rel_a__col__icontains=Alex | rel_a__col__icontains=Russell) # then if rel_a doesn't produce any rows, the whole condition # can't match. Hence we can safely use INNER join. if self.effective_connector == 'AND' or ( self.effective_connector == 'OR' and votes == self.num_children): to_demote.add(table) # Finally, what happens in cases where we have: # (rel_a__col=1|rel_b__col=2) & rel_a__col__gte=0 # Now, we first generate the OR clause, and promote joins for it # in the first if branch above. Both rel_a and rel_b are promoted # to LOUTER joins. After that we do the AND case. The OR case # voted no inner joins but the rel_a__col__gte=0 votes inner join # for rel_a. We demote it back to INNER join (in AND case a single # vote is enough). The demotion is OK, if rel_a doesn't produce # rows, then the rel_a__col__gte=0 clause can't be true, and thus # the whole clause must be false. So, it is safe to use INNER # join. # Note that in this example we could just as well have the __gte # clause and the OR clause swapped. Or we could replace the __gte # clause with an OR clause containing rel_a__col=1|rel_a__col=2, # and again we could safely demote to INNER. query.promote_joins(to_promote) query.demote_joins(to_demote) return to_demote
014a17f4fecd8b453cc1b183ce0baa9a84ad60a11327eb271fcbf6a281bd096b
import copy import threading import time import warnings from collections import deque from contextlib import contextmanager import _thread import pytz from django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.db import DEFAULT_DB_ALIAS from django.db.backends import utils from django.db.backends.base.validation import BaseDatabaseValidation from django.db.backends.signals import connection_created from django.db.transaction import TransactionManagementError from django.db.utils import DatabaseError, DatabaseErrorWrapper from django.utils import timezone from django.utils.asyncio import async_unsafe from django.utils.functional import cached_property NO_DB_ALIAS = '__no_db__' class BaseDatabaseWrapper: """Represent a database connection.""" # Mapping of Field objects to their column types. data_types = {} # Mapping of Field objects to their SQL suffix such as AUTOINCREMENT. data_types_suffix = {} # Mapping of Field objects to their SQL for CHECK constraints. data_type_check_constraints = {} ops = None vendor = 'unknown' display_name = 'unknown' SchemaEditorClass = None # Classes instantiated in __init__(). client_class = None creation_class = None features_class = None introspection_class = None ops_class = None validation_class = BaseDatabaseValidation queries_limit = 9000 def __init__(self, settings_dict, alias=DEFAULT_DB_ALIAS): # Connection related attributes. # The underlying database connection. self.connection = None # `settings_dict` should be a dictionary containing keys such as # NAME, USER, etc. It's called `settings_dict` instead of `settings` # to disambiguate it from Django settings modules. self.settings_dict = settings_dict self.alias = alias # Query logging in debug mode or when explicitly enabled. self.queries_log = deque(maxlen=self.queries_limit) self.force_debug_cursor = False # Transaction related attributes. # Tracks if the connection is in autocommit mode. Per PEP 249, by # default, it isn't. self.autocommit = False # Tracks if the connection is in a transaction managed by 'atomic'. self.in_atomic_block = False # Increment to generate unique savepoint ids. self.savepoint_state = 0 # List of savepoints created by 'atomic'. self.savepoint_ids = [] # Tracks if the outermost 'atomic' block should commit on exit, # ie. if autocommit was active on entry. self.commit_on_exit = True # Tracks if the transaction should be rolled back to the next # available savepoint because of an exception in an inner block. self.needs_rollback = False # Connection termination related attributes. self.close_at = None self.closed_in_transaction = False self.errors_occurred = False # Thread-safety related attributes. self._thread_sharing_lock = threading.Lock() self._thread_sharing_count = 0 self._thread_ident = _thread.get_ident() # A list of no-argument functions to run when the transaction commits. # Each entry is an (sids, func) tuple, where sids is a set of the # active savepoint IDs when this function was registered. self.run_on_commit = [] # Should we run the on-commit hooks the next time set_autocommit(True) # is called? self.run_commit_hooks_on_set_autocommit_on = False # A stack of wrappers to be invoked around execute()/executemany() # calls. Each entry is a function taking five arguments: execute, sql, # params, many, and context. It's the function's responsibility to # call execute(sql, params, many, context). self.execute_wrappers = [] self.client = self.client_class(self) self.creation = self.creation_class(self) self.features = self.features_class(self) self.introspection = self.introspection_class(self) self.ops = self.ops_class(self) self.validation = self.validation_class(self) def ensure_timezone(self): """ Ensure the connection's timezone is set to `self.timezone_name` and return whether it changed or not. """ return False @cached_property def timezone(self): """ Time zone for datetimes stored as naive values in the database. Return a tzinfo object or None. This is only needed when time zone support is enabled and the database doesn't support time zones. (When the database supports time zones, the adapter handles aware datetimes so Django doesn't need to.) """ if not settings.USE_TZ: return None elif self.features.supports_timezones: return None elif self.settings_dict['TIME_ZONE'] is None: return timezone.utc else: return pytz.timezone(self.settings_dict['TIME_ZONE']) @cached_property def timezone_name(self): """ Name of the time zone of the database connection. """ if not settings.USE_TZ: return settings.TIME_ZONE elif self.settings_dict['TIME_ZONE'] is None: return 'UTC' else: return self.settings_dict['TIME_ZONE'] @property def queries_logged(self): return self.force_debug_cursor or settings.DEBUG @property def queries(self): if len(self.queries_log) == self.queries_log.maxlen: warnings.warn( "Limit for query logging exceeded, only the last {} queries " "will be returned.".format(self.queries_log.maxlen)) return list(self.queries_log) # ##### Backend-specific methods for creating connections and cursors ##### def get_connection_params(self): """Return a dict of parameters suitable for get_new_connection.""" raise NotImplementedError('subclasses of BaseDatabaseWrapper may require a get_connection_params() method') def get_new_connection(self, conn_params): """Open a connection to the database.""" raise NotImplementedError('subclasses of BaseDatabaseWrapper may require a get_new_connection() method') def init_connection_state(self): """Initialize the database connection settings.""" raise NotImplementedError('subclasses of BaseDatabaseWrapper may require an init_connection_state() method') def create_cursor(self, name=None): """Create a cursor. Assume that a connection is established.""" raise NotImplementedError('subclasses of BaseDatabaseWrapper may require a create_cursor() method') # ##### Backend-specific methods for creating connections ##### @async_unsafe def connect(self): """Connect to the database. Assume that the connection is closed.""" # Check for invalid configurations. self.check_settings() # In case the previous connection was closed while in an atomic block self.in_atomic_block = False self.savepoint_ids = [] self.needs_rollback = False # Reset parameters defining when to close the connection max_age = self.settings_dict['CONN_MAX_AGE'] self.close_at = None if max_age is None else time.monotonic() + max_age self.closed_in_transaction = False self.errors_occurred = False # Establish the connection conn_params = self.get_connection_params() self.connection = self.get_new_connection(conn_params) self.set_autocommit(self.settings_dict['AUTOCOMMIT']) self.init_connection_state() connection_created.send(sender=self.__class__, connection=self) self.run_on_commit = [] def check_settings(self): if self.settings_dict['TIME_ZONE'] is not None: if not settings.USE_TZ: raise ImproperlyConfigured( "Connection '%s' cannot set TIME_ZONE because USE_TZ is " "False." % self.alias) elif self.features.supports_timezones: raise ImproperlyConfigured( "Connection '%s' cannot set TIME_ZONE because its engine " "handles time zones conversions natively." % self.alias) @async_unsafe def ensure_connection(self): """Guarantee that a connection to the database is established.""" if self.connection is None: with self.wrap_database_errors: self.connect() # ##### Backend-specific wrappers for PEP-249 connection methods ##### def _prepare_cursor(self, cursor): """ Validate the connection is usable and perform database cursor wrapping. """ self.validate_thread_sharing() if self.queries_logged: wrapped_cursor = self.make_debug_cursor(cursor) else: wrapped_cursor = self.make_cursor(cursor) return wrapped_cursor def _cursor(self, name=None): self.ensure_connection() with self.wrap_database_errors: return self._prepare_cursor(self.create_cursor(name)) def _commit(self): if self.connection is not None: with self.wrap_database_errors: return self.connection.commit() def _rollback(self): if self.connection is not None: with self.wrap_database_errors: return self.connection.rollback() def _close(self): if self.connection is not None: with self.wrap_database_errors: return self.connection.close() # ##### Generic wrappers for PEP-249 connection methods ##### @async_unsafe def cursor(self): """Create a cursor, opening a connection if necessary.""" return self._cursor() @async_unsafe def commit(self): """Commit a transaction and reset the dirty flag.""" self.validate_thread_sharing() self.validate_no_atomic_block() self._commit() # A successful commit means that the database connection works. self.errors_occurred = False self.run_commit_hooks_on_set_autocommit_on = True @async_unsafe def rollback(self): """Roll back a transaction and reset the dirty flag.""" self.validate_thread_sharing() self.validate_no_atomic_block() self._rollback() # A successful rollback means that the database connection works. self.errors_occurred = False self.needs_rollback = False self.run_on_commit = [] @async_unsafe def close(self): """Close the connection to the database.""" self.validate_thread_sharing() self.run_on_commit = [] # Don't call validate_no_atomic_block() to avoid making it difficult # to get rid of a connection in an invalid state. The next connect() # will reset the transaction state anyway. if self.closed_in_transaction or self.connection is None: return try: self._close() finally: if self.in_atomic_block: self.closed_in_transaction = True self.needs_rollback = True else: self.connection = None # ##### Backend-specific savepoint management methods ##### def _savepoint(self, sid): with self.cursor() as cursor: cursor.execute(self.ops.savepoint_create_sql(sid)) def _savepoint_rollback(self, sid): with self.cursor() as cursor: cursor.execute(self.ops.savepoint_rollback_sql(sid)) def _savepoint_commit(self, sid): with self.cursor() as cursor: cursor.execute(self.ops.savepoint_commit_sql(sid)) def _savepoint_allowed(self): # Savepoints cannot be created outside a transaction return self.features.uses_savepoints and not self.get_autocommit() # ##### Generic savepoint management methods ##### @async_unsafe def savepoint(self): """ Create a savepoint inside the current transaction. Return an identifier for the savepoint that will be used for the subsequent rollback or commit. Do nothing if savepoints are not supported. """ if not self._savepoint_allowed(): return thread_ident = _thread.get_ident() tid = str(thread_ident).replace('-', '') self.savepoint_state += 1 sid = "s%s_x%d" % (tid, self.savepoint_state) self.validate_thread_sharing() self._savepoint(sid) return sid @async_unsafe def savepoint_rollback(self, sid): """ Roll back to a savepoint. Do nothing if savepoints are not supported. """ if not self._savepoint_allowed(): return self.validate_thread_sharing() self._savepoint_rollback(sid) # Remove any callbacks registered while this savepoint was active. self.run_on_commit = [ (sids, func) for (sids, func) in self.run_on_commit if sid not in sids ] @async_unsafe def savepoint_commit(self, sid): """ Release a savepoint. Do nothing if savepoints are not supported. """ if not self._savepoint_allowed(): return self.validate_thread_sharing() self._savepoint_commit(sid) @async_unsafe def clean_savepoints(self): """ Reset the counter used to generate unique savepoint ids in this thread. """ self.savepoint_state = 0 # ##### Backend-specific transaction management methods ##### def _set_autocommit(self, autocommit): """ Backend-specific implementation to enable or disable autocommit. """ raise NotImplementedError('subclasses of BaseDatabaseWrapper may require a _set_autocommit() method') # ##### Generic transaction management methods ##### def get_autocommit(self): """Get the autocommit state.""" self.ensure_connection() return self.autocommit def set_autocommit(self, autocommit, force_begin_transaction_with_broken_autocommit=False): """ Enable or disable autocommit. The usual way to start a transaction is to turn autocommit off. SQLite does not properly start a transaction when disabling autocommit. To avoid this buggy behavior and to actually enter a new transaction, an explcit BEGIN is required. Using force_begin_transaction_with_broken_autocommit=True will issue an explicit BEGIN with SQLite. This option will be ignored for other backends. """ self.validate_no_atomic_block() self.ensure_connection() start_transaction_under_autocommit = ( force_begin_transaction_with_broken_autocommit and not autocommit and hasattr(self, '_start_transaction_under_autocommit') ) if start_transaction_under_autocommit: self._start_transaction_under_autocommit() else: self._set_autocommit(autocommit) self.autocommit = autocommit if autocommit and self.run_commit_hooks_on_set_autocommit_on: self.run_and_clear_commit_hooks() self.run_commit_hooks_on_set_autocommit_on = False def get_rollback(self): """Get the "needs rollback" flag -- for *advanced use* only.""" if not self.in_atomic_block: raise TransactionManagementError( "The rollback flag doesn't work outside of an 'atomic' block.") return self.needs_rollback def set_rollback(self, rollback): """ Set or unset the "needs rollback" flag -- for *advanced use* only. """ if not self.in_atomic_block: raise TransactionManagementError( "The rollback flag doesn't work outside of an 'atomic' block.") self.needs_rollback = rollback def validate_no_atomic_block(self): """Raise an error if an atomic block is active.""" if self.in_atomic_block: raise TransactionManagementError( "This is forbidden when an 'atomic' block is active.") def validate_no_broken_transaction(self): if self.needs_rollback: raise TransactionManagementError( "An error occurred in the current transaction. You can't " "execute queries until the end of the 'atomic' block.") # ##### Foreign key constraints checks handling ##### @contextmanager def constraint_checks_disabled(self): """ Disable foreign key constraint checking. """ disabled = self.disable_constraint_checking() try: yield finally: if disabled: self.enable_constraint_checking() def disable_constraint_checking(self): """ Backends can implement as needed to temporarily disable foreign key constraint checking. Should return True if the constraints were disabled and will need to be reenabled. """ return False def enable_constraint_checking(self): """ Backends can implement as needed to re-enable foreign key constraint checking. """ pass def check_constraints(self, table_names=None): """ Backends can override this method if they can apply constraint checking (e.g. via "SET CONSTRAINTS ALL IMMEDIATE"). Should raise an IntegrityError if any invalid foreign key references are encountered. """ pass # ##### Connection termination handling ##### def is_usable(self): """ Test if the database connection is usable. This method may assume that self.connection is not None. Actual implementations should take care not to raise exceptions as that may prevent Django from recycling unusable connections. """ raise NotImplementedError( "subclasses of BaseDatabaseWrapper may require an is_usable() method") def close_if_unusable_or_obsolete(self): """ Close the current connection if unrecoverable errors have occurred or if it outlived its maximum age. """ if self.connection is not None: # If the application didn't restore the original autocommit setting, # don't take chances, drop the connection. if self.get_autocommit() != self.settings_dict['AUTOCOMMIT']: self.close() return # If an exception other than DataError or IntegrityError occurred # since the last commit / rollback, check if the connection works. if self.errors_occurred: if self.is_usable(): self.errors_occurred = False else: self.close() return if self.close_at is not None and time.monotonic() >= self.close_at: self.close() return # ##### Thread safety handling ##### @property def allow_thread_sharing(self): with self._thread_sharing_lock: return self._thread_sharing_count > 0 def inc_thread_sharing(self): with self._thread_sharing_lock: self._thread_sharing_count += 1 def dec_thread_sharing(self): with self._thread_sharing_lock: if self._thread_sharing_count <= 0: raise RuntimeError('Cannot decrement the thread sharing count below zero.') self._thread_sharing_count -= 1 def validate_thread_sharing(self): """ Validate that the connection isn't accessed by another thread than the one which originally created it, unless the connection was explicitly authorized to be shared between threads (via the `inc_thread_sharing()` method). Raise an exception if the validation fails. """ if not (self.allow_thread_sharing or self._thread_ident == _thread.get_ident()): raise DatabaseError( "DatabaseWrapper objects created in a " "thread can only be used in that same thread. The object " "with alias '%s' was created in thread id %s and this is " "thread id %s." % (self.alias, self._thread_ident, _thread.get_ident()) ) # ##### Miscellaneous ##### def prepare_database(self): """ Hook to do any database check or preparation, generally called before migrating a project or an app. """ pass @cached_property def wrap_database_errors(self): """ Context manager and decorator that re-throws backend-specific database exceptions using Django's common wrappers. """ return DatabaseErrorWrapper(self) def chunked_cursor(self): """ Return a cursor that tries to avoid caching in the database (if supported by the database), otherwise return a regular cursor. """ return self.cursor() def make_debug_cursor(self, cursor): """Create a cursor that logs all queries in self.queries_log.""" return utils.CursorDebugWrapper(cursor, self) def make_cursor(self, cursor): """Create a cursor without debug logging.""" return utils.CursorWrapper(cursor, self) @contextmanager def temporary_connection(self): """ Context manager that ensures that a connection is established, and if it opened one, closes it to avoid leaving a dangling connection. This is useful for operations outside of the request-response cycle. Provide a cursor: with self.temporary_connection() as cursor: ... """ must_close = self.connection is None try: with self.cursor() as cursor: yield cursor finally: if must_close: self.close() @property def _nodb_connection(self): """ Return an alternative connection to be used when there is no need to access the main database, specifically for test db creation/deletion. This also prevents the production database from being exposed to potential child threads while (or after) the test database is destroyed. Refs #10868, #17786, #16969. """ return self.__class__({**self.settings_dict, 'NAME': None}, alias=NO_DB_ALIAS) def schema_editor(self, *args, **kwargs): """ Return a new instance of this backend's SchemaEditor. """ if self.SchemaEditorClass is None: raise NotImplementedError( 'The SchemaEditorClass attribute of this database wrapper is still None') return self.SchemaEditorClass(self, *args, **kwargs) def on_commit(self, func): if self.in_atomic_block: # Transaction in progress; save for execution on commit. self.run_on_commit.append((set(self.savepoint_ids), func)) elif not self.get_autocommit(): raise TransactionManagementError('on_commit() cannot be used in manual transaction management') else: # No transaction in progress and in autocommit mode; execute # immediately. func() def run_and_clear_commit_hooks(self): self.validate_no_atomic_block() current_run_on_commit = self.run_on_commit self.run_on_commit = [] while current_run_on_commit: sids, func = current_run_on_commit.pop(0) func() @contextmanager def execute_wrapper(self, wrapper): """ Return a context manager under which the wrapper is applied to suitable database query executions. """ self.execute_wrappers.append(wrapper) try: yield finally: self.execute_wrappers.pop() def copy(self, alias=None): """ Return a copy of this connection. For tests that require two connections to the same database. """ settings_dict = copy.deepcopy(self.settings_dict) if alias is None: alias = self.alias return type(self)(settings_dict, alias)
9a4879fe0314d3894968fc611e5a39ad3c42403381b5e9bd8c06e10c2e05f247
import datetime import decimal import gettext as gettext_module import os import pickle import re import tempfile from contextlib import contextmanager from importlib import import_module from pathlib import Path from unittest import mock from asgiref.local import Local from django import forms from django.apps import AppConfig from django.conf import settings from django.conf.locale import LANG_INFO from django.conf.urls.i18n import i18n_patterns from django.template import Context, Template from django.test import ( RequestFactory, SimpleTestCase, TestCase, override_settings, ) from django.utils import translation from django.utils.deprecation import RemovedInDjango40Warning from django.utils.formats import ( date_format, get_format, get_format_modules, iter_format_modules, localize, localize_input, reset_format_cache, sanitize_separators, time_format, ) from django.utils.numberformat import format as nformat from django.utils.safestring import SafeString, mark_safe from django.utils.translation import ( LANGUAGE_SESSION_KEY, activate, check_for_language, deactivate, get_language, get_language_bidi, get_language_from_request, get_language_info, gettext, gettext_lazy, ngettext, ngettext_lazy, npgettext, npgettext_lazy, pgettext, round_away_from_one, to_language, to_locale, trans_null, trans_real, ugettext, ugettext_lazy, ugettext_noop, ungettext, ungettext_lazy, ) from django.utils.translation.reloader import ( translation_file_changed, watch_for_translation_changes, ) from .forms import CompanyForm, I18nForm, SelectDateForm from .models import Company, TestModel here = os.path.dirname(os.path.abspath(__file__)) extended_locale_paths = settings.LOCALE_PATHS + [ os.path.join(here, 'other', 'locale'), ] class AppModuleStub: def __init__(self, **kwargs): self.__dict__.update(kwargs) @contextmanager def patch_formats(lang, **settings): from django.utils.formats import _format_cache # Populate _format_cache with temporary values for key, value in settings.items(): _format_cache[(key, lang)] = value try: yield finally: reset_format_cache() class TranslationTests(SimpleTestCase): @translation.override('de') def test_legacy_aliases(self): """ Pre-Django 2.0 aliases with u prefix are still available. """ msg = ( 'django.utils.translation.ugettext_noop() is deprecated in favor ' 'of django.utils.translation.gettext_noop().' ) with self.assertWarnsMessage(RemovedInDjango40Warning, msg): self.assertEqual(ugettext_noop("Image"), "Image") msg = ( 'django.utils.translation.ugettext() is deprecated in favor of ' 'django.utils.translation.gettext().' ) with self.assertWarnsMessage(RemovedInDjango40Warning, msg): self.assertEqual(ugettext("Image"), "Bild") msg = ( 'django.utils.translation.ugettext_lazy() is deprecated in favor ' 'of django.utils.translation.gettext_lazy().' ) with self.assertWarnsMessage(RemovedInDjango40Warning, msg): self.assertEqual(ugettext_lazy("Image"), gettext_lazy("Image")) msg = ( 'django.utils.translation.ungettext() is deprecated in favor of ' 'django.utils.translation.ngettext().' ) with self.assertWarnsMessage(RemovedInDjango40Warning, msg): self.assertEqual(ungettext("%d year", "%d years", 0) % 0, "0 Jahre") msg = ( 'django.utils.translation.ungettext_lazy() is deprecated in favor ' 'of django.utils.translation.ngettext_lazy().' ) with self.assertWarnsMessage(RemovedInDjango40Warning, msg): self.assertEqual( ungettext_lazy("%d year", "%d years", 0) % 0, ngettext_lazy("%d year", "%d years", 0) % 0, ) @translation.override('fr') def test_plural(self): """ Test plurals with ngettext. French differs from English in that 0 is singular. """ self.assertEqual(ngettext("%d year", "%d years", 0) % 0, "0 année") self.assertEqual(ngettext("%d year", "%d years", 2) % 2, "2 années") self.assertEqual(ngettext("%(size)d byte", "%(size)d bytes", 0) % {'size': 0}, "0 octet") self.assertEqual(ngettext("%(size)d byte", "%(size)d bytes", 2) % {'size': 2}, "2 octets") def test_plural_null(self): g = trans_null.ngettext self.assertEqual(g('%d year', '%d years', 0) % 0, '0 years') self.assertEqual(g('%d year', '%d years', 1) % 1, '1 year') self.assertEqual(g('%d year', '%d years', 2) % 2, '2 years') def test_override(self): activate('de') try: with translation.override('pl'): self.assertEqual(get_language(), 'pl') self.assertEqual(get_language(), 'de') with translation.override(None): self.assertIsNone(get_language()) with translation.override('pl'): pass self.assertIsNone(get_language()) self.assertEqual(get_language(), 'de') finally: deactivate() def test_override_decorator(self): @translation.override('pl') def func_pl(): self.assertEqual(get_language(), 'pl') @translation.override(None) def func_none(): self.assertIsNone(get_language()) try: activate('de') func_pl() self.assertEqual(get_language(), 'de') func_none() self.assertEqual(get_language(), 'de') finally: deactivate() def test_override_exit(self): """ The language restored is the one used when the function was called, not the one used when the decorator was initialized (#23381). """ activate('fr') @translation.override('pl') def func_pl(): pass deactivate() try: activate('en') func_pl() self.assertEqual(get_language(), 'en') finally: deactivate() def test_lazy_objects(self): """ Format string interpolation should work with *_lazy objects. """ s = gettext_lazy('Add %(name)s') d = {'name': 'Ringo'} self.assertEqual('Add Ringo', s % d) with translation.override('de', deactivate=True): self.assertEqual('Ringo hinzuf\xfcgen', s % d) with translation.override('pl'): self.assertEqual('Dodaj Ringo', s % d) # It should be possible to compare *_lazy objects. s1 = gettext_lazy('Add %(name)s') self.assertEqual(s, s1) s2 = gettext_lazy('Add %(name)s') s3 = gettext_lazy('Add %(name)s') self.assertEqual(s2, s3) self.assertEqual(s, s2) s4 = gettext_lazy('Some other string') self.assertNotEqual(s, s4) def test_lazy_pickle(self): s1 = gettext_lazy("test") self.assertEqual(str(s1), "test") s2 = pickle.loads(pickle.dumps(s1)) self.assertEqual(str(s2), "test") @override_settings(LOCALE_PATHS=extended_locale_paths) def test_ngettext_lazy(self): simple_with_format = ngettext_lazy('%d good result', '%d good results') simple_context_with_format = npgettext_lazy('Exclamation', '%d good result', '%d good results') simple_without_format = ngettext_lazy('good result', 'good results') with translation.override('de'): self.assertEqual(simple_with_format % 1, '1 gutes Resultat') self.assertEqual(simple_with_format % 4, '4 guten Resultate') self.assertEqual(simple_context_with_format % 1, '1 gutes Resultat!') self.assertEqual(simple_context_with_format % 4, '4 guten Resultate!') self.assertEqual(simple_without_format % 1, 'gutes Resultat') self.assertEqual(simple_without_format % 4, 'guten Resultate') complex_nonlazy = ngettext_lazy('Hi %(name)s, %(num)d good result', 'Hi %(name)s, %(num)d good results', 4) complex_deferred = ngettext_lazy( 'Hi %(name)s, %(num)d good result', 'Hi %(name)s, %(num)d good results', 'num' ) complex_context_nonlazy = npgettext_lazy( 'Greeting', 'Hi %(name)s, %(num)d good result', 'Hi %(name)s, %(num)d good results', 4 ) complex_context_deferred = npgettext_lazy( 'Greeting', 'Hi %(name)s, %(num)d good result', 'Hi %(name)s, %(num)d good results', 'num' ) with translation.override('de'): self.assertEqual(complex_nonlazy % {'num': 4, 'name': 'Jim'}, 'Hallo Jim, 4 guten Resultate') self.assertEqual(complex_deferred % {'name': 'Jim', 'num': 1}, 'Hallo Jim, 1 gutes Resultat') self.assertEqual(complex_deferred % {'name': 'Jim', 'num': 5}, 'Hallo Jim, 5 guten Resultate') with self.assertRaisesMessage(KeyError, 'Your dictionary lacks key'): complex_deferred % {'name': 'Jim'} self.assertEqual(complex_context_nonlazy % {'num': 4, 'name': 'Jim'}, 'Willkommen Jim, 4 guten Resultate') self.assertEqual(complex_context_deferred % {'name': 'Jim', 'num': 1}, 'Willkommen Jim, 1 gutes Resultat') self.assertEqual(complex_context_deferred % {'name': 'Jim', 'num': 5}, 'Willkommen Jim, 5 guten Resultate') with self.assertRaisesMessage(KeyError, 'Your dictionary lacks key'): complex_context_deferred % {'name': 'Jim'} @override_settings(LOCALE_PATHS=extended_locale_paths) def test_ngettext_lazy_format_style(self): simple_with_format = ngettext_lazy('{} good result', '{} good results') simple_context_with_format = npgettext_lazy('Exclamation', '{} good result', '{} good results') with translation.override('de'): self.assertEqual(simple_with_format.format(1), '1 gutes Resultat') self.assertEqual(simple_with_format.format(4), '4 guten Resultate') self.assertEqual(simple_context_with_format.format(1), '1 gutes Resultat!') self.assertEqual(simple_context_with_format.format(4), '4 guten Resultate!') complex_nonlazy = ngettext_lazy('Hi {name}, {num} good result', 'Hi {name}, {num} good results', 4) complex_deferred = ngettext_lazy( 'Hi {name}, {num} good result', 'Hi {name}, {num} good results', 'num' ) complex_context_nonlazy = npgettext_lazy( 'Greeting', 'Hi {name}, {num} good result', 'Hi {name}, {num} good results', 4 ) complex_context_deferred = npgettext_lazy( 'Greeting', 'Hi {name}, {num} good result', 'Hi {name}, {num} good results', 'num' ) with translation.override('de'): self.assertEqual(complex_nonlazy.format(num=4, name='Jim'), 'Hallo Jim, 4 guten Resultate') self.assertEqual(complex_deferred.format(name='Jim', num=1), 'Hallo Jim, 1 gutes Resultat') self.assertEqual(complex_deferred.format(name='Jim', num=5), 'Hallo Jim, 5 guten Resultate') with self.assertRaisesMessage(KeyError, 'Your dictionary lacks key'): complex_deferred.format(name='Jim') self.assertEqual(complex_context_nonlazy.format(num=4, name='Jim'), 'Willkommen Jim, 4 guten Resultate') self.assertEqual(complex_context_deferred.format(name='Jim', num=1), 'Willkommen Jim, 1 gutes Resultat') self.assertEqual(complex_context_deferred.format(name='Jim', num=5), 'Willkommen Jim, 5 guten Resultate') with self.assertRaisesMessage(KeyError, 'Your dictionary lacks key'): complex_context_deferred.format(name='Jim') def test_ngettext_lazy_bool(self): self.assertTrue(ngettext_lazy('%d good result', '%d good results')) self.assertFalse(ngettext_lazy('', '')) def test_ngettext_lazy_pickle(self): s1 = ngettext_lazy('%d good result', '%d good results') self.assertEqual(s1 % 1, '1 good result') self.assertEqual(s1 % 8, '8 good results') s2 = pickle.loads(pickle.dumps(s1)) self.assertEqual(s2 % 1, '1 good result') self.assertEqual(s2 % 8, '8 good results') @override_settings(LOCALE_PATHS=extended_locale_paths) def test_pgettext(self): trans_real._active = Local() trans_real._translations = {} with translation.override('de'): self.assertEqual(pgettext("unexisting", "May"), "May") self.assertEqual(pgettext("month name", "May"), "Mai") self.assertEqual(pgettext("verb", "May"), "Kann") self.assertEqual(npgettext("search", "%d result", "%d results", 4) % 4, "4 Resultate") def test_empty_value(self): """Empty value must stay empty after being translated (#23196).""" with translation.override('de'): self.assertEqual('', gettext('')) s = mark_safe('') self.assertEqual(s, gettext(s)) @override_settings(LOCALE_PATHS=extended_locale_paths) def test_safe_status(self): """ Translating a string requiring no auto-escaping with gettext or pgettext shouldn't change the "safe" status. """ trans_real._active = Local() trans_real._translations = {} s1 = mark_safe('Password') s2 = mark_safe('May') with translation.override('de', deactivate=True): self.assertIs(type(gettext(s1)), SafeString) self.assertIs(type(pgettext('month name', s2)), SafeString) self.assertEqual('aPassword', SafeString('a') + s1) self.assertEqual('Passworda', s1 + SafeString('a')) self.assertEqual('Passworda', s1 + mark_safe('a')) self.assertEqual('aPassword', mark_safe('a') + s1) self.assertEqual('as', mark_safe('a') + mark_safe('s')) def test_maclines(self): """ Translations on files with Mac or DOS end of lines will be converted to unix EOF in .po catalogs. """ ca_translation = trans_real.translation('ca') ca_translation._catalog['Mac\nEOF\n'] = 'Catalan Mac\nEOF\n' ca_translation._catalog['Win\nEOF\n'] = 'Catalan Win\nEOF\n' with translation.override('ca', deactivate=True): self.assertEqual('Catalan Mac\nEOF\n', gettext('Mac\rEOF\r')) self.assertEqual('Catalan Win\nEOF\n', gettext('Win\r\nEOF\r\n')) def test_to_locale(self): tests = ( ('en', 'en'), ('EN', 'en'), ('en-us', 'en_US'), ('EN-US', 'en_US'), # With > 2 characters after the dash. ('sr-latn', 'sr_Latn'), ('sr-LATN', 'sr_Latn'), # With private use subtag (x-informal). ('nl-nl-x-informal', 'nl_NL-x-informal'), ('NL-NL-X-INFORMAL', 'nl_NL-x-informal'), ('sr-latn-x-informal', 'sr_Latn-x-informal'), ('SR-LATN-X-INFORMAL', 'sr_Latn-x-informal'), ) for lang, locale in tests: with self.subTest(lang=lang): self.assertEqual(to_locale(lang), locale) def test_to_language(self): self.assertEqual(to_language('en_US'), 'en-us') self.assertEqual(to_language('sr_Lat'), 'sr-lat') def test_language_bidi(self): self.assertIs(get_language_bidi(), False) with translation.override(None): self.assertIs(get_language_bidi(), False) def test_language_bidi_null(self): self.assertIs(trans_null.get_language_bidi(), False) with override_settings(LANGUAGE_CODE='he'): self.assertIs(get_language_bidi(), True) class TranslationThreadSafetyTests(SimpleTestCase): def setUp(self): self._old_language = get_language() self._translations = trans_real._translations # here we rely on .split() being called inside the _fetch() # in trans_real.translation() class sideeffect_str(str): def split(self, *args, **kwargs): res = str.split(self, *args, **kwargs) trans_real._translations['en-YY'] = None return res trans_real._translations = {sideeffect_str('en-XX'): None} def tearDown(self): trans_real._translations = self._translations activate(self._old_language) def test_bug14894_translation_activate_thread_safety(self): translation_count = len(trans_real._translations) # May raise RuntimeError if translation.activate() isn't thread-safe. translation.activate('pl') # make sure sideeffect_str actually added a new translation self.assertLess(translation_count, len(trans_real._translations)) @override_settings(USE_L10N=True) class FormattingTests(SimpleTestCase): def setUp(self): super().setUp() self.n = decimal.Decimal('66666.666') self.f = 99999.999 self.d = datetime.date(2009, 12, 31) self.dt = datetime.datetime(2009, 12, 31, 20, 50) self.t = datetime.time(10, 15, 48) self.long = 10000 self.ctxt = Context({ 'n': self.n, 't': self.t, 'd': self.d, 'dt': self.dt, 'f': self.f, 'l': self.long, }) def test_all_format_strings(self): all_locales = LANG_INFO.keys() some_date = datetime.date(2017, 10, 14) some_datetime = datetime.datetime(2017, 10, 14, 10, 23) for locale in all_locales: with self.subTest(locale=locale), translation.override(locale): self.assertIn('2017', date_format(some_date)) # Uses DATE_FORMAT by default self.assertIn('23', time_format(some_datetime)) # Uses TIME_FORMAT by default self.assertIn('2017', date_format(some_datetime, format=get_format('DATETIME_FORMAT'))) self.assertIn('2017', date_format(some_date, format=get_format('YEAR_MONTH_FORMAT'))) self.assertIn('14', date_format(some_date, format=get_format('MONTH_DAY_FORMAT'))) self.assertIn('2017', date_format(some_date, format=get_format('SHORT_DATE_FORMAT'))) self.assertIn('2017', date_format(some_datetime, format=get_format('SHORT_DATETIME_FORMAT'))) def test_locale_independent(self): """ Localization of numbers """ with self.settings(USE_THOUSAND_SEPARATOR=False): self.assertEqual('66666.66', nformat(self.n, decimal_sep='.', decimal_pos=2, grouping=3, thousand_sep=',')) self.assertEqual('66666A6', nformat(self.n, decimal_sep='A', decimal_pos=1, grouping=1, thousand_sep='B')) self.assertEqual('66666', nformat(self.n, decimal_sep='X', decimal_pos=0, grouping=1, thousand_sep='Y')) with self.settings(USE_THOUSAND_SEPARATOR=True): self.assertEqual( '66,666.66', nformat(self.n, decimal_sep='.', decimal_pos=2, grouping=3, thousand_sep=',') ) self.assertEqual( '6B6B6B6B6A6', nformat(self.n, decimal_sep='A', decimal_pos=1, grouping=1, thousand_sep='B') ) self.assertEqual('-66666.6', nformat(-66666.666, decimal_sep='.', decimal_pos=1)) self.assertEqual('-66666.0', nformat(int('-66666'), decimal_sep='.', decimal_pos=1)) self.assertEqual('10000.0', nformat(self.long, decimal_sep='.', decimal_pos=1)) self.assertEqual( '10,00,00,000.00', nformat(100000000.00, decimal_sep='.', decimal_pos=2, grouping=(3, 2, 0), thousand_sep=',') ) self.assertEqual( '1,0,00,000,0000.00', nformat(10000000000.00, decimal_sep='.', decimal_pos=2, grouping=(4, 3, 2, 1, 0), thousand_sep=',') ) self.assertEqual( '10000,00,000.00', nformat(1000000000.00, decimal_sep='.', decimal_pos=2, grouping=(3, 2, -1), thousand_sep=',') ) # This unusual grouping/force_grouping combination may be triggered by the intcomma filter (#17414) self.assertEqual( '10000', nformat(self.long, decimal_sep='.', decimal_pos=0, grouping=0, force_grouping=True) ) # date filter self.assertEqual('31.12.2009 в 20:50', Template('{{ dt|date:"d.m.Y в H:i" }}').render(self.ctxt)) self.assertEqual('⌚ 10:15', Template('{{ t|time:"⌚ H:i" }}').render(self.ctxt)) @override_settings(USE_L10N=False) def test_l10n_disabled(self): """ Catalan locale with format i18n disabled translations will be used, but not formats """ with translation.override('ca', deactivate=True): self.maxDiff = 3000 self.assertEqual('N j, Y', get_format('DATE_FORMAT')) self.assertEqual(0, get_format('FIRST_DAY_OF_WEEK')) self.assertEqual('.', get_format('DECIMAL_SEPARATOR')) self.assertEqual('10:15 a.m.', time_format(self.t)) self.assertEqual('des. 31, 2009', date_format(self.d)) self.assertEqual('desembre 2009', date_format(self.d, 'YEAR_MONTH_FORMAT')) self.assertEqual('12/31/2009 8:50 p.m.', date_format(self.dt, 'SHORT_DATETIME_FORMAT')) self.assertEqual('No localizable', localize('No localizable')) self.assertEqual('66666.666', localize(self.n)) self.assertEqual('99999.999', localize(self.f)) self.assertEqual('10000', localize(self.long)) self.assertEqual('des. 31, 2009', localize(self.d)) self.assertEqual('des. 31, 2009, 8:50 p.m.', localize(self.dt)) self.assertEqual('66666.666', Template('{{ n }}').render(self.ctxt)) self.assertEqual('99999.999', Template('{{ f }}').render(self.ctxt)) self.assertEqual('des. 31, 2009', Template('{{ d }}').render(self.ctxt)) self.assertEqual('des. 31, 2009, 8:50 p.m.', Template('{{ dt }}').render(self.ctxt)) self.assertEqual('66666.67', Template('{{ n|floatformat:2 }}').render(self.ctxt)) self.assertEqual('100000.0', Template('{{ f|floatformat }}').render(self.ctxt)) self.assertEqual('10:15 a.m.', Template('{{ t|time:"TIME_FORMAT" }}').render(self.ctxt)) self.assertEqual('12/31/2009', Template('{{ d|date:"SHORT_DATE_FORMAT" }}').render(self.ctxt)) self.assertEqual( '12/31/2009 8:50 p.m.', Template('{{ dt|date:"SHORT_DATETIME_FORMAT" }}').render(self.ctxt) ) form = I18nForm({ 'decimal_field': '66666,666', 'float_field': '99999,999', 'date_field': '31/12/2009', 'datetime_field': '31/12/2009 20:50', 'time_field': '20:50', 'integer_field': '1.234', }) self.assertFalse(form.is_valid()) self.assertEqual(['Introdu\xefu un n\xfamero.'], form.errors['float_field']) self.assertEqual(['Introdu\xefu un n\xfamero.'], form.errors['decimal_field']) self.assertEqual(['Introdu\xefu una data v\xe0lida.'], form.errors['date_field']) self.assertEqual(['Introdu\xefu una data/hora v\xe0lides.'], form.errors['datetime_field']) self.assertEqual(['Introdu\xefu un n\xfamero sencer.'], form.errors['integer_field']) form2 = SelectDateForm({ 'date_field_month': '12', 'date_field_day': '31', 'date_field_year': '2009' }) self.assertTrue(form2.is_valid()) self.assertEqual(datetime.date(2009, 12, 31), form2.cleaned_data['date_field']) self.assertHTMLEqual( '<select name="mydate_month" id="id_mydate_month">' '<option value="">---</option>' '<option value="1">gener</option>' '<option value="2">febrer</option>' '<option value="3">mar\xe7</option>' '<option value="4">abril</option>' '<option value="5">maig</option>' '<option value="6">juny</option>' '<option value="7">juliol</option>' '<option value="8">agost</option>' '<option value="9">setembre</option>' '<option value="10">octubre</option>' '<option value="11">novembre</option>' '<option value="12" selected>desembre</option>' '</select>' '<select name="mydate_day" id="id_mydate_day">' '<option value="">---</option>' '<option value="1">1</option>' '<option value="2">2</option>' '<option value="3">3</option>' '<option value="4">4</option>' '<option value="5">5</option>' '<option value="6">6</option>' '<option value="7">7</option>' '<option value="8">8</option>' '<option value="9">9</option>' '<option value="10">10</option>' '<option value="11">11</option>' '<option value="12">12</option>' '<option value="13">13</option>' '<option value="14">14</option>' '<option value="15">15</option>' '<option value="16">16</option>' '<option value="17">17</option>' '<option value="18">18</option>' '<option value="19">19</option>' '<option value="20">20</option>' '<option value="21">21</option>' '<option value="22">22</option>' '<option value="23">23</option>' '<option value="24">24</option>' '<option value="25">25</option>' '<option value="26">26</option>' '<option value="27">27</option>' '<option value="28">28</option>' '<option value="29">29</option>' '<option value="30">30</option>' '<option value="31" selected>31</option>' '</select>' '<select name="mydate_year" id="id_mydate_year">' '<option value="">---</option>' '<option value="2009" selected>2009</option>' '<option value="2010">2010</option>' '<option value="2011">2011</option>' '<option value="2012">2012</option>' '<option value="2013">2013</option>' '<option value="2014">2014</option>' '<option value="2015">2015</option>' '<option value="2016">2016</option>' '<option value="2017">2017</option>' '<option value="2018">2018</option>' '</select>', forms.SelectDateWidget(years=range(2009, 2019)).render('mydate', datetime.date(2009, 12, 31)) ) # We shouldn't change the behavior of the floatformat filter re: # thousand separator and grouping when USE_L10N is False even # if the USE_THOUSAND_SEPARATOR, NUMBER_GROUPING and # THOUSAND_SEPARATOR settings are specified with self.settings(USE_THOUSAND_SEPARATOR=True, NUMBER_GROUPING=1, THOUSAND_SEPARATOR='!'): self.assertEqual('66666.67', Template('{{ n|floatformat:2 }}').render(self.ctxt)) self.assertEqual('100000.0', Template('{{ f|floatformat }}').render(self.ctxt)) def test_false_like_locale_formats(self): """ The active locale's formats take precedence over the default settings even if they would be interpreted as False in a conditional test (e.g. 0 or empty string) (#16938). """ with translation.override('fr'): with self.settings(USE_THOUSAND_SEPARATOR=True, THOUSAND_SEPARATOR='!'): self.assertEqual('\xa0', get_format('THOUSAND_SEPARATOR')) # Even a second time (after the format has been cached)... self.assertEqual('\xa0', get_format('THOUSAND_SEPARATOR')) with self.settings(FIRST_DAY_OF_WEEK=0): self.assertEqual(1, get_format('FIRST_DAY_OF_WEEK')) # Even a second time (after the format has been cached)... self.assertEqual(1, get_format('FIRST_DAY_OF_WEEK')) def test_l10n_enabled(self): self.maxDiff = 3000 # Catalan locale with translation.override('ca', deactivate=True): self.assertEqual(r'j \d\e F \d\e Y', get_format('DATE_FORMAT')) self.assertEqual(1, get_format('FIRST_DAY_OF_WEEK')) self.assertEqual(',', get_format('DECIMAL_SEPARATOR')) self.assertEqual('10:15', time_format(self.t)) self.assertEqual('31 de desembre de 2009', date_format(self.d)) self.assertEqual('desembre del 2009', date_format(self.d, 'YEAR_MONTH_FORMAT')) self.assertEqual('31/12/2009 20:50', date_format(self.dt, 'SHORT_DATETIME_FORMAT')) self.assertEqual('No localizable', localize('No localizable')) with self.settings(USE_THOUSAND_SEPARATOR=True): self.assertEqual('66.666,666', localize(self.n)) self.assertEqual('99.999,999', localize(self.f)) self.assertEqual('10.000', localize(self.long)) self.assertEqual('True', localize(True)) with self.settings(USE_THOUSAND_SEPARATOR=False): self.assertEqual('66666,666', localize(self.n)) self.assertEqual('99999,999', localize(self.f)) self.assertEqual('10000', localize(self.long)) self.assertEqual('31 de desembre de 2009', localize(self.d)) self.assertEqual('31 de desembre de 2009 a les 20:50', localize(self.dt)) with self.settings(USE_THOUSAND_SEPARATOR=True): self.assertEqual('66.666,666', Template('{{ n }}').render(self.ctxt)) self.assertEqual('99.999,999', Template('{{ f }}').render(self.ctxt)) self.assertEqual('10.000', Template('{{ l }}').render(self.ctxt)) with self.settings(USE_THOUSAND_SEPARATOR=True): form3 = I18nForm({ 'decimal_field': '66.666,666', 'float_field': '99.999,999', 'date_field': '31/12/2009', 'datetime_field': '31/12/2009 20:50', 'time_field': '20:50', 'integer_field': '1.234', }) self.assertTrue(form3.is_valid()) self.assertEqual(decimal.Decimal('66666.666'), form3.cleaned_data['decimal_field']) self.assertEqual(99999.999, form3.cleaned_data['float_field']) self.assertEqual(datetime.date(2009, 12, 31), form3.cleaned_data['date_field']) self.assertEqual(datetime.datetime(2009, 12, 31, 20, 50), form3.cleaned_data['datetime_field']) self.assertEqual(datetime.time(20, 50), form3.cleaned_data['time_field']) self.assertEqual(1234, form3.cleaned_data['integer_field']) with self.settings(USE_THOUSAND_SEPARATOR=False): self.assertEqual('66666,666', Template('{{ n }}').render(self.ctxt)) self.assertEqual('99999,999', Template('{{ f }}').render(self.ctxt)) self.assertEqual('31 de desembre de 2009', Template('{{ d }}').render(self.ctxt)) self.assertEqual('31 de desembre de 2009 a les 20:50', Template('{{ dt }}').render(self.ctxt)) self.assertEqual('66666,67', Template('{{ n|floatformat:2 }}').render(self.ctxt)) self.assertEqual('100000,0', Template('{{ f|floatformat }}').render(self.ctxt)) self.assertEqual('10:15', Template('{{ t|time:"TIME_FORMAT" }}').render(self.ctxt)) self.assertEqual('31/12/2009', Template('{{ d|date:"SHORT_DATE_FORMAT" }}').render(self.ctxt)) self.assertEqual( '31/12/2009 20:50', Template('{{ dt|date:"SHORT_DATETIME_FORMAT" }}').render(self.ctxt) ) self.assertEqual(date_format(datetime.datetime.now(), "DATE_FORMAT"), Template('{% now "DATE_FORMAT" %}').render(self.ctxt)) with self.settings(USE_THOUSAND_SEPARATOR=False): form4 = I18nForm({ 'decimal_field': '66666,666', 'float_field': '99999,999', 'date_field': '31/12/2009', 'datetime_field': '31/12/2009 20:50', 'time_field': '20:50', 'integer_field': '1234', }) self.assertTrue(form4.is_valid()) self.assertEqual(decimal.Decimal('66666.666'), form4.cleaned_data['decimal_field']) self.assertEqual(99999.999, form4.cleaned_data['float_field']) self.assertEqual(datetime.date(2009, 12, 31), form4.cleaned_data['date_field']) self.assertEqual(datetime.datetime(2009, 12, 31, 20, 50), form4.cleaned_data['datetime_field']) self.assertEqual(datetime.time(20, 50), form4.cleaned_data['time_field']) self.assertEqual(1234, form4.cleaned_data['integer_field']) form5 = SelectDateForm({ 'date_field_month': '12', 'date_field_day': '31', 'date_field_year': '2009' }) self.assertTrue(form5.is_valid()) self.assertEqual(datetime.date(2009, 12, 31), form5.cleaned_data['date_field']) self.assertHTMLEqual( '<select name="mydate_day" id="id_mydate_day">' '<option value="">---</option>' '<option value="1">1</option>' '<option value="2">2</option>' '<option value="3">3</option>' '<option value="4">4</option>' '<option value="5">5</option>' '<option value="6">6</option>' '<option value="7">7</option>' '<option value="8">8</option>' '<option value="9">9</option>' '<option value="10">10</option>' '<option value="11">11</option>' '<option value="12">12</option>' '<option value="13">13</option>' '<option value="14">14</option>' '<option value="15">15</option>' '<option value="16">16</option>' '<option value="17">17</option>' '<option value="18">18</option>' '<option value="19">19</option>' '<option value="20">20</option>' '<option value="21">21</option>' '<option value="22">22</option>' '<option value="23">23</option>' '<option value="24">24</option>' '<option value="25">25</option>' '<option value="26">26</option>' '<option value="27">27</option>' '<option value="28">28</option>' '<option value="29">29</option>' '<option value="30">30</option>' '<option value="31" selected>31</option>' '</select>' '<select name="mydate_month" id="id_mydate_month">' '<option value="">---</option>' '<option value="1">gener</option>' '<option value="2">febrer</option>' '<option value="3">mar\xe7</option>' '<option value="4">abril</option>' '<option value="5">maig</option>' '<option value="6">juny</option>' '<option value="7">juliol</option>' '<option value="8">agost</option>' '<option value="9">setembre</option>' '<option value="10">octubre</option>' '<option value="11">novembre</option>' '<option value="12" selected>desembre</option>' '</select>' '<select name="mydate_year" id="id_mydate_year">' '<option value="">---</option>' '<option value="2009" selected>2009</option>' '<option value="2010">2010</option>' '<option value="2011">2011</option>' '<option value="2012">2012</option>' '<option value="2013">2013</option>' '<option value="2014">2014</option>' '<option value="2015">2015</option>' '<option value="2016">2016</option>' '<option value="2017">2017</option>' '<option value="2018">2018</option>' '</select>', forms.SelectDateWidget(years=range(2009, 2019)).render('mydate', datetime.date(2009, 12, 31)) ) # Russian locale (with E as month) with translation.override('ru', deactivate=True): self.assertHTMLEqual( '<select name="mydate_day" id="id_mydate_day">' '<option value="">---</option>' '<option value="1">1</option>' '<option value="2">2</option>' '<option value="3">3</option>' '<option value="4">4</option>' '<option value="5">5</option>' '<option value="6">6</option>' '<option value="7">7</option>' '<option value="8">8</option>' '<option value="9">9</option>' '<option value="10">10</option>' '<option value="11">11</option>' '<option value="12">12</option>' '<option value="13">13</option>' '<option value="14">14</option>' '<option value="15">15</option>' '<option value="16">16</option>' '<option value="17">17</option>' '<option value="18">18</option>' '<option value="19">19</option>' '<option value="20">20</option>' '<option value="21">21</option>' '<option value="22">22</option>' '<option value="23">23</option>' '<option value="24">24</option>' '<option value="25">25</option>' '<option value="26">26</option>' '<option value="27">27</option>' '<option value="28">28</option>' '<option value="29">29</option>' '<option value="30">30</option>' '<option value="31" selected>31</option>' '</select>' '<select name="mydate_month" id="id_mydate_month">' '<option value="">---</option>' '<option value="1">\u042f\u043d\u0432\u0430\u0440\u044c</option>' '<option value="2">\u0424\u0435\u0432\u0440\u0430\u043b\u044c</option>' '<option value="3">\u041c\u0430\u0440\u0442</option>' '<option value="4">\u0410\u043f\u0440\u0435\u043b\u044c</option>' '<option value="5">\u041c\u0430\u0439</option>' '<option value="6">\u0418\u044e\u043d\u044c</option>' '<option value="7">\u0418\u044e\u043b\u044c</option>' '<option value="8">\u0410\u0432\u0433\u0443\u0441\u0442</option>' '<option value="9">\u0421\u0435\u043d\u0442\u044f\u0431\u0440\u044c</option>' '<option value="10">\u041e\u043a\u0442\u044f\u0431\u0440\u044c</option>' '<option value="11">\u041d\u043e\u044f\u0431\u0440\u044c</option>' '<option value="12" selected>\u0414\u0435\u043a\u0430\u0431\u0440\u044c</option>' '</select>' '<select name="mydate_year" id="id_mydate_year">' '<option value="">---</option>' '<option value="2009" selected>2009</option>' '<option value="2010">2010</option>' '<option value="2011">2011</option>' '<option value="2012">2012</option>' '<option value="2013">2013</option>' '<option value="2014">2014</option>' '<option value="2015">2015</option>' '<option value="2016">2016</option>' '<option value="2017">2017</option>' '<option value="2018">2018</option>' '</select>', forms.SelectDateWidget(years=range(2009, 2019)).render('mydate', datetime.date(2009, 12, 31)) ) # English locale with translation.override('en', deactivate=True): self.assertEqual('N j, Y', get_format('DATE_FORMAT')) self.assertEqual(0, get_format('FIRST_DAY_OF_WEEK')) self.assertEqual('.', get_format('DECIMAL_SEPARATOR')) self.assertEqual('Dec. 31, 2009', date_format(self.d)) self.assertEqual('December 2009', date_format(self.d, 'YEAR_MONTH_FORMAT')) self.assertEqual('12/31/2009 8:50 p.m.', date_format(self.dt, 'SHORT_DATETIME_FORMAT')) self.assertEqual('No localizable', localize('No localizable')) with self.settings(USE_THOUSAND_SEPARATOR=True): self.assertEqual('66,666.666', localize(self.n)) self.assertEqual('99,999.999', localize(self.f)) self.assertEqual('10,000', localize(self.long)) with self.settings(USE_THOUSAND_SEPARATOR=False): self.assertEqual('66666.666', localize(self.n)) self.assertEqual('99999.999', localize(self.f)) self.assertEqual('10000', localize(self.long)) self.assertEqual('Dec. 31, 2009', localize(self.d)) self.assertEqual('Dec. 31, 2009, 8:50 p.m.', localize(self.dt)) with self.settings(USE_THOUSAND_SEPARATOR=True): self.assertEqual('66,666.666', Template('{{ n }}').render(self.ctxt)) self.assertEqual('99,999.999', Template('{{ f }}').render(self.ctxt)) self.assertEqual('10,000', Template('{{ l }}').render(self.ctxt)) with self.settings(USE_THOUSAND_SEPARATOR=False): self.assertEqual('66666.666', Template('{{ n }}').render(self.ctxt)) self.assertEqual('99999.999', Template('{{ f }}').render(self.ctxt)) self.assertEqual('Dec. 31, 2009', Template('{{ d }}').render(self.ctxt)) self.assertEqual('Dec. 31, 2009, 8:50 p.m.', Template('{{ dt }}').render(self.ctxt)) self.assertEqual('66666.67', Template('{{ n|floatformat:2 }}').render(self.ctxt)) self.assertEqual('100000.0', Template('{{ f|floatformat }}').render(self.ctxt)) self.assertEqual('12/31/2009', Template('{{ d|date:"SHORT_DATE_FORMAT" }}').render(self.ctxt)) self.assertEqual( '12/31/2009 8:50 p.m.', Template('{{ dt|date:"SHORT_DATETIME_FORMAT" }}').render(self.ctxt) ) form5 = I18nForm({ 'decimal_field': '66666.666', 'float_field': '99999.999', 'date_field': '12/31/2009', 'datetime_field': '12/31/2009 20:50', 'time_field': '20:50', 'integer_field': '1234', }) self.assertTrue(form5.is_valid()) self.assertEqual(decimal.Decimal('66666.666'), form5.cleaned_data['decimal_field']) self.assertEqual(99999.999, form5.cleaned_data['float_field']) self.assertEqual(datetime.date(2009, 12, 31), form5.cleaned_data['date_field']) self.assertEqual(datetime.datetime(2009, 12, 31, 20, 50), form5.cleaned_data['datetime_field']) self.assertEqual(datetime.time(20, 50), form5.cleaned_data['time_field']) self.assertEqual(1234, form5.cleaned_data['integer_field']) form6 = SelectDateForm({ 'date_field_month': '12', 'date_field_day': '31', 'date_field_year': '2009' }) self.assertTrue(form6.is_valid()) self.assertEqual(datetime.date(2009, 12, 31), form6.cleaned_data['date_field']) self.assertHTMLEqual( '<select name="mydate_month" id="id_mydate_month">' '<option value="">---</option>' '<option value="1">January</option>' '<option value="2">February</option>' '<option value="3">March</option>' '<option value="4">April</option>' '<option value="5">May</option>' '<option value="6">June</option>' '<option value="7">July</option>' '<option value="8">August</option>' '<option value="9">September</option>' '<option value="10">October</option>' '<option value="11">November</option>' '<option value="12" selected>December</option>' '</select>' '<select name="mydate_day" id="id_mydate_day">' '<option value="">---</option>' '<option value="1">1</option>' '<option value="2">2</option>' '<option value="3">3</option>' '<option value="4">4</option>' '<option value="5">5</option>' '<option value="6">6</option>' '<option value="7">7</option>' '<option value="8">8</option>' '<option value="9">9</option>' '<option value="10">10</option>' '<option value="11">11</option>' '<option value="12">12</option>' '<option value="13">13</option>' '<option value="14">14</option>' '<option value="15">15</option>' '<option value="16">16</option>' '<option value="17">17</option>' '<option value="18">18</option>' '<option value="19">19</option>' '<option value="20">20</option>' '<option value="21">21</option>' '<option value="22">22</option>' '<option value="23">23</option>' '<option value="24">24</option>' '<option value="25">25</option>' '<option value="26">26</option>' '<option value="27">27</option>' '<option value="28">28</option>' '<option value="29">29</option>' '<option value="30">30</option>' '<option value="31" selected>31</option>' '</select>' '<select name="mydate_year" id="id_mydate_year">' '<option value="">---</option>' '<option value="2009" selected>2009</option>' '<option value="2010">2010</option>' '<option value="2011">2011</option>' '<option value="2012">2012</option>' '<option value="2013">2013</option>' '<option value="2014">2014</option>' '<option value="2015">2015</option>' '<option value="2016">2016</option>' '<option value="2017">2017</option>' '<option value="2018">2018</option>' '</select>', forms.SelectDateWidget(years=range(2009, 2019)).render('mydate', datetime.date(2009, 12, 31)) ) def test_sub_locales(self): """ Check if sublocales fall back to the main locale """ with self.settings(USE_THOUSAND_SEPARATOR=True): with translation.override('de-at', deactivate=True): self.assertEqual('66.666,666', Template('{{ n }}').render(self.ctxt)) with translation.override('es-us', deactivate=True): self.assertEqual('31 de Diciembre de 2009', date_format(self.d)) def test_localized_input(self): """ Tests if form input is correctly localized """ self.maxDiff = 1200 with translation.override('de-at', deactivate=True): form6 = CompanyForm({ 'name': 'acme', 'date_added': datetime.datetime(2009, 12, 31, 6, 0, 0), 'cents_paid': decimal.Decimal('59.47'), 'products_delivered': 12000, }) self.assertTrue(form6.is_valid()) self.assertHTMLEqual( form6.as_ul(), '<li><label for="id_name">Name:</label>' '<input id="id_name" type="text" name="name" value="acme" maxlength="50" required></li>' '<li><label for="id_date_added">Date added:</label>' '<input type="text" name="date_added" value="31.12.2009 06:00:00" id="id_date_added" required></li>' '<li><label for="id_cents_paid">Cents paid:</label>' '<input type="text" name="cents_paid" value="59,47" id="id_cents_paid" required></li>' '<li><label for="id_products_delivered">Products delivered:</label>' '<input type="text" name="products_delivered" value="12000" id="id_products_delivered" required>' '</li>' ) self.assertEqual(localize_input(datetime.datetime(2009, 12, 31, 6, 0, 0)), '31.12.2009 06:00:00') self.assertEqual(datetime.datetime(2009, 12, 31, 6, 0, 0), form6.cleaned_data['date_added']) with self.settings(USE_THOUSAND_SEPARATOR=True): # Checking for the localized "products_delivered" field self.assertInHTML( '<input type="text" name="products_delivered" ' 'value="12.000" id="id_products_delivered" required>', form6.as_ul() ) def test_localized_input_func(self): tests = ( (True, 'True'), (datetime.date(1, 1, 1), '0001-01-01'), (datetime.datetime(1, 1, 1), '0001-01-01 00:00:00'), ) with self.settings(USE_THOUSAND_SEPARATOR=True): for value, expected in tests: with self.subTest(value=value): self.assertEqual(localize_input(value), expected) def test_sanitize_separators(self): """ Tests django.utils.formats.sanitize_separators. """ # Non-strings are untouched self.assertEqual(sanitize_separators(123), 123) with translation.override('ru', deactivate=True): # Russian locale has non-breaking space (\xa0) as thousand separator # Usual space is accepted too when sanitizing inputs with self.settings(USE_THOUSAND_SEPARATOR=True): self.assertEqual(sanitize_separators('1\xa0234\xa0567'), '1234567') self.assertEqual(sanitize_separators('77\xa0777,777'), '77777.777') self.assertEqual(sanitize_separators('12 345'), '12345') self.assertEqual(sanitize_separators('77 777,777'), '77777.777') with self.settings(USE_THOUSAND_SEPARATOR=True, USE_L10N=False): self.assertEqual(sanitize_separators('12\xa0345'), '12\xa0345') with self.settings(USE_THOUSAND_SEPARATOR=True): with patch_formats(get_language(), THOUSAND_SEPARATOR='.', DECIMAL_SEPARATOR=','): self.assertEqual(sanitize_separators('10.234'), '10234') # Suspicion that user entered dot as decimal separator (#22171) self.assertEqual(sanitize_separators('10.10'), '10.10') with self.settings(USE_L10N=False, DECIMAL_SEPARATOR=','): self.assertEqual(sanitize_separators('1001,10'), '1001.10') self.assertEqual(sanitize_separators('1001.10'), '1001.10') with self.settings( USE_L10N=False, DECIMAL_SEPARATOR=',', USE_THOUSAND_SEPARATOR=True, THOUSAND_SEPARATOR='.' ): self.assertEqual(sanitize_separators('1.001,10'), '1001.10') self.assertEqual(sanitize_separators('1001,10'), '1001.10') self.assertEqual(sanitize_separators('1001.10'), '1001.10') self.assertEqual(sanitize_separators('1,001.10'), '1.001.10') # Invalid output def test_iter_format_modules(self): """ Tests the iter_format_modules function. """ # Importing some format modules so that we can compare the returned # modules with these expected modules default_mod = import_module('django.conf.locale.de.formats') test_mod = import_module('i18n.other.locale.de.formats') test_mod2 = import_module('i18n.other2.locale.de.formats') with translation.override('de-at', deactivate=True): # Should return the correct default module when no setting is set self.assertEqual(list(iter_format_modules('de')), [default_mod]) # When the setting is a string, should return the given module and # the default module self.assertEqual( list(iter_format_modules('de', 'i18n.other.locale')), [test_mod, default_mod]) # When setting is a list of strings, should return the given # modules and the default module self.assertEqual( list(iter_format_modules('de', ['i18n.other.locale', 'i18n.other2.locale'])), [test_mod, test_mod2, default_mod]) def test_iter_format_modules_stability(self): """ Tests the iter_format_modules function always yields format modules in a stable and correct order in presence of both base ll and ll_CC formats. """ en_format_mod = import_module('django.conf.locale.en.formats') en_gb_format_mod = import_module('django.conf.locale.en_GB.formats') self.assertEqual(list(iter_format_modules('en-gb')), [en_gb_format_mod, en_format_mod]) def test_get_format_modules_lang(self): with translation.override('de', deactivate=True): self.assertEqual('.', get_format('DECIMAL_SEPARATOR', lang='en')) def test_get_format_modules_stability(self): with self.settings(FORMAT_MODULE_PATH='i18n.other.locale'): with translation.override('de', deactivate=True): old = "%r" % get_format_modules(reverse=True) new = "%r" % get_format_modules(reverse=True) # second try self.assertEqual(new, old, 'Value returned by get_formats_modules() must be preserved between calls.') def test_localize_templatetag_and_filter(self): """ Test the {% localize %} templatetag and the localize/unlocalize filters. """ context = Context({'int': 1455, 'float': 3.14, 'date': datetime.date(2016, 12, 31)}) template1 = Template( '{% load l10n %}{% localize %}{{ int }}/{{ float }}/{{ date }}{% endlocalize %}; ' '{% localize on %}{{ int }}/{{ float }}/{{ date }}{% endlocalize %}' ) template2 = Template( '{% load l10n %}{{ int }}/{{ float }}/{{ date }}; ' '{% localize off %}{{ int }}/{{ float }}/{{ date }};{% endlocalize %} ' '{{ int }}/{{ float }}/{{ date }}' ) template3 = Template( '{% load l10n %}{{ int }}/{{ float }}/{{ date }}; ' '{{ int|unlocalize }}/{{ float|unlocalize }}/{{ date|unlocalize }}' ) template4 = Template( '{% load l10n %}{{ int }}/{{ float }}/{{ date }}; ' '{{ int|localize }}/{{ float|localize }}/{{ date|localize }}' ) expected_localized = '1.455/3,14/31. Dezember 2016' expected_unlocalized = '1455/3.14/Dez. 31, 2016' output1 = '; '.join([expected_localized, expected_localized]) output2 = '; '.join([expected_localized, expected_unlocalized, expected_localized]) output3 = '; '.join([expected_localized, expected_unlocalized]) output4 = '; '.join([expected_unlocalized, expected_localized]) with translation.override('de', deactivate=True): with self.settings(USE_L10N=False, USE_THOUSAND_SEPARATOR=True): self.assertEqual(template1.render(context), output1) self.assertEqual(template4.render(context), output4) with self.settings(USE_L10N=True, USE_THOUSAND_SEPARATOR=True): self.assertEqual(template1.render(context), output1) self.assertEqual(template2.render(context), output2) self.assertEqual(template3.render(context), output3) def test_localized_as_text_as_hidden_input(self): """ Tests if form input with 'as_hidden' or 'as_text' is correctly localized. Ticket #18777 """ self.maxDiff = 1200 with translation.override('de-at', deactivate=True): template = Template('{% load l10n %}{{ form.date_added }}; {{ form.cents_paid }}') template_as_text = Template('{% load l10n %}{{ form.date_added.as_text }}; {{ form.cents_paid.as_text }}') template_as_hidden = Template( '{% load l10n %}{{ form.date_added.as_hidden }}; {{ form.cents_paid.as_hidden }}' ) form = CompanyForm({ 'name': 'acme', 'date_added': datetime.datetime(2009, 12, 31, 6, 0, 0), 'cents_paid': decimal.Decimal('59.47'), 'products_delivered': 12000, }) context = Context({'form': form}) self.assertTrue(form.is_valid()) self.assertHTMLEqual( template.render(context), '<input id="id_date_added" name="date_added" type="text" value="31.12.2009 06:00:00" required>;' '<input id="id_cents_paid" name="cents_paid" type="text" value="59,47" required>' ) self.assertHTMLEqual( template_as_text.render(context), '<input id="id_date_added" name="date_added" type="text" value="31.12.2009 06:00:00" required>;' ' <input id="id_cents_paid" name="cents_paid" type="text" value="59,47" required>' ) self.assertHTMLEqual( template_as_hidden.render(context), '<input id="id_date_added" name="date_added" type="hidden" value="31.12.2009 06:00:00">;' '<input id="id_cents_paid" name="cents_paid" type="hidden" value="59,47">' ) def test_format_arbitrary_settings(self): self.assertEqual(get_format('DEBUG'), 'DEBUG') def test_get_custom_format(self): with self.settings(FORMAT_MODULE_PATH='i18n.other.locale'): with translation.override('fr', deactivate=True): self.assertEqual('d/m/Y CUSTOM', get_format('CUSTOM_DAY_FORMAT')) def test_admin_javascript_supported_input_formats(self): """ The first input format for DATE_INPUT_FORMATS, TIME_INPUT_FORMATS, and DATETIME_INPUT_FORMATS must not contain %f since that's unsupported by the admin's time picker widget. """ regex = re.compile('%([^BcdHImMpSwxXyY%])') for language_code, language_name in settings.LANGUAGES: for format_name in ('DATE_INPUT_FORMATS', 'TIME_INPUT_FORMATS', 'DATETIME_INPUT_FORMATS'): with self.subTest(language=language_code, format=format_name): formatter = get_format(format_name, lang=language_code)[0] self.assertEqual( regex.findall(formatter), [], "%s locale's %s uses an unsupported format code." % (language_code, format_name) ) class MiscTests(SimpleTestCase): rf = RequestFactory() @override_settings(LANGUAGE_CODE='de') def test_english_fallback(self): """ With a non-English LANGUAGE_CODE and if the active language is English or one of its variants, the untranslated string should be returned (instead of falling back to LANGUAGE_CODE) (See #24413). """ self.assertEqual(gettext("Image"), "Bild") with translation.override('en'): self.assertEqual(gettext("Image"), "Image") with translation.override('en-us'): self.assertEqual(gettext("Image"), "Image") with translation.override('en-ca'): self.assertEqual(gettext("Image"), "Image") def test_parse_spec_http_header(self): """ Testing HTTP header parsing. First, we test that we can parse the values according to the spec (and that we extract all the pieces in the right order). """ tests = [ # Good headers ('de', [('de', 1.0)]), ('en-AU', [('en-au', 1.0)]), ('es-419', [('es-419', 1.0)]), ('*;q=1.00', [('*', 1.0)]), ('en-AU;q=0.123', [('en-au', 0.123)]), ('en-au;q=0.5', [('en-au', 0.5)]), ('en-au;q=1.0', [('en-au', 1.0)]), ('da, en-gb;q=0.25, en;q=0.5', [('da', 1.0), ('en', 0.5), ('en-gb', 0.25)]), ('en-au-xx', [('en-au-xx', 1.0)]), ('de,en-au;q=0.75,en-us;q=0.5,en;q=0.25,es;q=0.125,fa;q=0.125', [('de', 1.0), ('en-au', 0.75), ('en-us', 0.5), ('en', 0.25), ('es', 0.125), ('fa', 0.125)]), ('*', [('*', 1.0)]), ('de;q=0.', [('de', 0.0)]), ('en; q=1,', [('en', 1.0)]), ('en; q=1.0, * ; q=0.5', [('en', 1.0), ('*', 0.5)]), # Bad headers ('en-gb;q=1.0000', []), ('en;q=0.1234', []), ('en;q=.2', []), ('abcdefghi-au', []), ('**', []), ('en,,gb', []), ('en-au;q=0.1.0', []), (('X' * 97) + 'Z,en', []), ('da, en-gb;q=0.8, en;q=0.7,#', []), ('de;q=2.0', []), ('de;q=0.a', []), ('12-345', []), ('', []), ('en;q=1e0', []), ] for value, expected in tests: with self.subTest(value=value): self.assertEqual(trans_real.parse_accept_lang_header(value), tuple(expected)) def test_parse_literal_http_header(self): """ Now test that we parse a literal HTTP header correctly. """ g = get_language_from_request r = self.rf.get('/') r.COOKIES = {} r.META = {'HTTP_ACCEPT_LANGUAGE': 'pt-br'} self.assertEqual('pt-br', g(r)) r.META = {'HTTP_ACCEPT_LANGUAGE': 'pt'} self.assertEqual('pt', g(r)) r.META = {'HTTP_ACCEPT_LANGUAGE': 'es,de'} self.assertEqual('es', g(r)) r.META = {'HTTP_ACCEPT_LANGUAGE': 'es-ar,de'} self.assertEqual('es-ar', g(r)) # This test assumes there won't be a Django translation to a US # variation of the Spanish language, a safe assumption. When the # user sets it as the preferred language, the main 'es' # translation should be selected instead. r.META = {'HTTP_ACCEPT_LANGUAGE': 'es-us'} self.assertEqual(g(r), 'es') # This tests the following scenario: there isn't a main language (zh) # translation of Django but there is a translation to variation (zh-hans) # the user sets zh-hans as the preferred language, it should be selected # by Django without falling back nor ignoring it. r.META = {'HTTP_ACCEPT_LANGUAGE': 'zh-hans,de'} self.assertEqual(g(r), 'zh-hans') r.META = {'HTTP_ACCEPT_LANGUAGE': 'NL'} self.assertEqual('nl', g(r)) r.META = {'HTTP_ACCEPT_LANGUAGE': 'fy'} self.assertEqual('fy', g(r)) r.META = {'HTTP_ACCEPT_LANGUAGE': 'ia'} self.assertEqual('ia', g(r)) r.META = {'HTTP_ACCEPT_LANGUAGE': 'sr-latn'} self.assertEqual('sr-latn', g(r)) r.META = {'HTTP_ACCEPT_LANGUAGE': 'zh-hans'} self.assertEqual('zh-hans', g(r)) r.META = {'HTTP_ACCEPT_LANGUAGE': 'zh-hant'} self.assertEqual('zh-hant', g(r)) @override_settings( LANGUAGES=[ ('en', 'English'), ('zh-hans', 'Simplified Chinese'), ('zh-hant', 'Traditional Chinese'), ] ) def test_support_for_deprecated_chinese_language_codes(self): """ Some browsers (Firefox, IE, etc.) use deprecated language codes. As these language codes will be removed in Django 1.9, these will be incorrectly matched. For example zh-tw (traditional) will be interpreted as zh-hans (simplified), which is wrong. So we should also accept these deprecated language codes. refs #18419 -- this is explicitly for browser compatibility """ g = get_language_from_request r = self.rf.get('/') r.COOKIES = {} r.META = {'HTTP_ACCEPT_LANGUAGE': 'zh-cn,en'} self.assertEqual(g(r), 'zh-hans') r.META = {'HTTP_ACCEPT_LANGUAGE': 'zh-tw,en'} self.assertEqual(g(r), 'zh-hant') def test_special_fallback_language(self): """ Some languages may have special fallbacks that don't follow the simple 'fr-ca' -> 'fr' logic (notably Chinese codes). """ r = self.rf.get('/') r.COOKIES = {} r.META = {'HTTP_ACCEPT_LANGUAGE': 'zh-my,en'} self.assertEqual(get_language_from_request(r), 'zh-hans') def test_parse_language_cookie(self): """ Now test that we parse language preferences stored in a cookie correctly. """ g = get_language_from_request r = self.rf.get('/') r.COOKIES = {settings.LANGUAGE_COOKIE_NAME: 'pt-br'} r.META = {} self.assertEqual('pt-br', g(r)) r.COOKIES = {settings.LANGUAGE_COOKIE_NAME: 'pt'} r.META = {} self.assertEqual('pt', g(r)) r.COOKIES = {settings.LANGUAGE_COOKIE_NAME: 'es'} r.META = {'HTTP_ACCEPT_LANGUAGE': 'de'} self.assertEqual('es', g(r)) # This test assumes there won't be a Django translation to a US # variation of the Spanish language, a safe assumption. When the # user sets it as the preferred language, the main 'es' # translation should be selected instead. r.COOKIES = {settings.LANGUAGE_COOKIE_NAME: 'es-us'} r.META = {} self.assertEqual(g(r), 'es') # This tests the following scenario: there isn't a main language (zh) # translation of Django but there is a translation to variation (zh-hans) # the user sets zh-hans as the preferred language, it should be selected # by Django without falling back nor ignoring it. r.COOKIES = {settings.LANGUAGE_COOKIE_NAME: 'zh-hans'} r.META = {'HTTP_ACCEPT_LANGUAGE': 'de'} self.assertEqual(g(r), 'zh-hans') @override_settings( USE_I18N=True, LANGUAGES=[ ('en', 'English'), ('de', 'German'), ('de-at', 'Austrian German'), ('pt-br', 'Portuguese (Brazil)'), ], ) def test_get_supported_language_variant_real(self): g = trans_real.get_supported_language_variant self.assertEqual(g('en'), 'en') self.assertEqual(g('en-gb'), 'en') self.assertEqual(g('de'), 'de') self.assertEqual(g('de-at'), 'de-at') self.assertEqual(g('de-ch'), 'de') self.assertEqual(g('pt-br'), 'pt-br') self.assertEqual(g('pt'), 'pt-br') self.assertEqual(g('pt-pt'), 'pt-br') with self.assertRaises(LookupError): g('pt', strict=True) with self.assertRaises(LookupError): g('pt-pt', strict=True) with self.assertRaises(LookupError): g('xyz') with self.assertRaises(LookupError): g('xy-zz') def test_get_supported_language_variant_null(self): g = trans_null.get_supported_language_variant self.assertEqual(g(settings.LANGUAGE_CODE), settings.LANGUAGE_CODE) with self.assertRaises(LookupError): g('pt') with self.assertRaises(LookupError): g('de') with self.assertRaises(LookupError): g('de-at') with self.assertRaises(LookupError): g('de', strict=True) with self.assertRaises(LookupError): g('de-at', strict=True) with self.assertRaises(LookupError): g('xyz') @override_settings( LANGUAGES=[ ('en', 'English'), ('de', 'German'), ('de-at', 'Austrian German'), ('pl', 'Polish'), ], ) def test_get_language_from_path_real(self): g = trans_real.get_language_from_path self.assertEqual(g('/pl/'), 'pl') self.assertEqual(g('/pl'), 'pl') self.assertIsNone(g('/xyz/')) self.assertEqual(g('/en/'), 'en') self.assertEqual(g('/en-gb/'), 'en') self.assertEqual(g('/de/'), 'de') self.assertEqual(g('/de-at/'), 'de-at') self.assertEqual(g('/de-ch/'), 'de') self.assertIsNone(g('/de-simple-page/')) def test_get_language_from_path_null(self): g = trans_null.get_language_from_path self.assertIsNone(g('/pl/')) self.assertIsNone(g('/pl')) self.assertIsNone(g('/xyz/')) def test_cache_resetting(self): """ After setting LANGUAGE, the cache should be cleared and languages previously valid should not be used (#14170). """ g = get_language_from_request r = self.rf.get('/') r.COOKIES = {} r.META = {'HTTP_ACCEPT_LANGUAGE': 'pt-br'} self.assertEqual('pt-br', g(r)) with self.settings(LANGUAGES=[('en', 'English')]): self.assertNotEqual('pt-br', g(r)) def test_i18n_patterns_returns_list(self): with override_settings(USE_I18N=False): self.assertIsInstance(i18n_patterns([]), list) with override_settings(USE_I18N=True): self.assertIsInstance(i18n_patterns([]), list) class ResolutionOrderI18NTests(SimpleTestCase): def setUp(self): super().setUp() activate('de') def tearDown(self): deactivate() super().tearDown() def assertGettext(self, msgid, msgstr): result = gettext(msgid) self.assertIn( msgstr, result, "The string '%s' isn't in the translation of '%s'; the actual result is '%s'." % (msgstr, msgid, result) ) class AppResolutionOrderI18NTests(ResolutionOrderI18NTests): @override_settings(LANGUAGE_CODE='de') def test_app_translation(self): # Original translation. self.assertGettext('Date/time', 'Datum/Zeit') # Different translation. with self.modify_settings(INSTALLED_APPS={'append': 'i18n.resolution'}): # Force refreshing translations. activate('de') # Doesn't work because it's added later in the list. self.assertGettext('Date/time', 'Datum/Zeit') with self.modify_settings(INSTALLED_APPS={'remove': 'django.contrib.admin.apps.SimpleAdminConfig'}): # Force refreshing translations. activate('de') # Unless the original is removed from the list. self.assertGettext('Date/time', 'Datum/Zeit (APP)') @override_settings(LOCALE_PATHS=extended_locale_paths) class LocalePathsResolutionOrderI18NTests(ResolutionOrderI18NTests): def test_locale_paths_translation(self): self.assertGettext('Time', 'LOCALE_PATHS') def test_locale_paths_override_app_translation(self): with self.settings(INSTALLED_APPS=['i18n.resolution']): self.assertGettext('Time', 'LOCALE_PATHS') class DjangoFallbackResolutionOrderI18NTests(ResolutionOrderI18NTests): def test_django_fallback(self): self.assertEqual(gettext('Date/time'), 'Datum/Zeit') @override_settings(INSTALLED_APPS=['i18n.territorial_fallback']) class TranslationFallbackI18NTests(ResolutionOrderI18NTests): def test_sparse_territory_catalog(self): """ Untranslated strings for territorial language variants use the translations of the generic language. In this case, the de-de translation falls back to de. """ with translation.override('de-de'): self.assertGettext('Test 1 (en)', '(de-de)') self.assertGettext('Test 2 (en)', '(de)') class TestModels(TestCase): def test_lazy(self): tm = TestModel() tm.save() def test_safestr(self): c = Company(cents_paid=12, products_delivered=1) c.name = SafeString('Iñtërnâtiônàlizætiøn1') c.save() class TestLanguageInfo(SimpleTestCase): def test_localized_language_info(self): li = get_language_info('de') self.assertEqual(li['code'], 'de') self.assertEqual(li['name_local'], 'Deutsch') self.assertEqual(li['name'], 'German') self.assertIs(li['bidi'], False) def test_unknown_language_code(self): with self.assertRaisesMessage(KeyError, "Unknown language code xx"): get_language_info('xx') with translation.override('xx'): # A language with no translation catalogs should fallback to the # untranslated string. self.assertEqual(gettext("Title"), "Title") def test_unknown_only_country_code(self): li = get_language_info('de-xx') self.assertEqual(li['code'], 'de') self.assertEqual(li['name_local'], 'Deutsch') self.assertEqual(li['name'], 'German') self.assertIs(li['bidi'], False) def test_unknown_language_code_and_country_code(self): with self.assertRaisesMessage(KeyError, "Unknown language code xx-xx and xx"): get_language_info('xx-xx') def test_fallback_language_code(self): """ get_language_info return the first fallback language info if the lang_info struct does not contain the 'name' key. """ li = get_language_info('zh-my') self.assertEqual(li['code'], 'zh-hans') li = get_language_info('zh-hans') self.assertEqual(li['code'], 'zh-hans') @override_settings( USE_I18N=True, LANGUAGES=[ ('en', 'English'), ('fr', 'French'), ], MIDDLEWARE=[ 'django.middleware.locale.LocaleMiddleware', 'django.middleware.common.CommonMiddleware', ], ROOT_URLCONF='i18n.urls', ) class LocaleMiddlewareTests(TestCase): def test_streaming_response(self): # Regression test for #5241 response = self.client.get('/fr/streaming/') self.assertContains(response, "Oui/Non") response = self.client.get('/en/streaming/') self.assertContains(response, "Yes/No") @override_settings( MIDDLEWARE=[ 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.locale.LocaleMiddleware', 'django.middleware.common.CommonMiddleware', ], ) def test_language_not_saved_to_session(self): """ The Current language isno' automatically saved to the session on every request (#21473). """ self.client.get('/fr/simple/') self.assertNotIn(LANGUAGE_SESSION_KEY, self.client.session) @override_settings( USE_I18N=True, LANGUAGES=[ ('en', 'English'), ('de', 'German'), ('fr', 'French'), ], MIDDLEWARE=[ 'django.middleware.locale.LocaleMiddleware', 'django.middleware.common.CommonMiddleware', ], ROOT_URLCONF='i18n.urls_default_unprefixed', LANGUAGE_CODE='en', ) class UnprefixedDefaultLanguageTests(SimpleTestCase): def test_default_lang_without_prefix(self): """ With i18n_patterns(..., prefix_default_language=False), the default language (settings.LANGUAGE_CODE) should be accessible without a prefix. """ response = self.client.get('/simple/') self.assertEqual(response.content, b'Yes') def test_other_lang_with_prefix(self): response = self.client.get('/fr/simple/') self.assertEqual(response.content, b'Oui') def test_unprefixed_language_other_than_accept_language(self): response = self.client.get('/simple/', HTTP_ACCEPT_LANGUAGE='fr') self.assertEqual(response.content, b'Yes') def test_page_with_dash(self): # A page starting with /de* shouldn't match the 'de' language code. response = self.client.get('/de-simple-page/') self.assertEqual(response.content, b'Yes') def test_no_redirect_on_404(self): """ A request for a nonexistent URL shouldn't cause a redirect to /<defaut_language>/<request_url> when prefix_default_language=False and /<default_language>/<request_url> has a URL match (#27402). """ # A match for /group1/group2/ must exist for this to act as a # regression test. response = self.client.get('/group1/group2/') self.assertEqual(response.status_code, 200) response = self.client.get('/nonexistent/') self.assertEqual(response.status_code, 404) @override_settings( USE_I18N=True, LANGUAGES=[ ('bg', 'Bulgarian'), ('en-us', 'English'), ('pt-br', 'Portuguese (Brazil)'), ], MIDDLEWARE=[ 'django.middleware.locale.LocaleMiddleware', 'django.middleware.common.CommonMiddleware', ], ROOT_URLCONF='i18n.urls' ) class CountrySpecificLanguageTests(SimpleTestCase): rf = RequestFactory() def test_check_for_language(self): self.assertTrue(check_for_language('en')) self.assertTrue(check_for_language('en-us')) self.assertTrue(check_for_language('en-US')) self.assertFalse(check_for_language('en_US')) self.assertTrue(check_for_language('be')) self.assertTrue(check_for_language('be@latin')) self.assertTrue(check_for_language('sr-RS@latin')) self.assertTrue(check_for_language('sr-RS@12345')) self.assertFalse(check_for_language('en-ü')) self.assertFalse(check_for_language('en\x00')) self.assertFalse(check_for_language(None)) self.assertFalse(check_for_language('be@ ')) # Specifying encoding is not supported (Django enforces UTF-8) self.assertFalse(check_for_language('tr-TR.UTF-8')) self.assertFalse(check_for_language('tr-TR.UTF8')) self.assertFalse(check_for_language('de-DE.utf-8')) def test_check_for_language_null(self): self.assertIs(trans_null.check_for_language('en'), True) def test_get_language_from_request(self): # issue 19919 r = self.rf.get('/') r.COOKIES = {} r.META = {'HTTP_ACCEPT_LANGUAGE': 'en-US,en;q=0.8,bg;q=0.6,ru;q=0.4'} lang = get_language_from_request(r) self.assertEqual('en-us', lang) r = self.rf.get('/') r.COOKIES = {} r.META = {'HTTP_ACCEPT_LANGUAGE': 'bg-bg,en-US;q=0.8,en;q=0.6,ru;q=0.4'} lang = get_language_from_request(r) self.assertEqual('bg', lang) def test_get_language_from_request_null(self): lang = trans_null.get_language_from_request(None) self.assertEqual(lang, 'en') with override_settings(LANGUAGE_CODE='de'): lang = trans_null.get_language_from_request(None) self.assertEqual(lang, 'de') def test_specific_language_codes(self): # issue 11915 r = self.rf.get('/') r.COOKIES = {} r.META = {'HTTP_ACCEPT_LANGUAGE': 'pt,en-US;q=0.8,en;q=0.6,ru;q=0.4'} lang = get_language_from_request(r) self.assertEqual('pt-br', lang) r = self.rf.get('/') r.COOKIES = {} r.META = {'HTTP_ACCEPT_LANGUAGE': 'pt-pt,en-US;q=0.8,en;q=0.6,ru;q=0.4'} lang = get_language_from_request(r) self.assertEqual('pt-br', lang) class TranslationFilesMissing(SimpleTestCase): def setUp(self): super().setUp() self.gettext_find_builtin = gettext_module.find def tearDown(self): gettext_module.find = self.gettext_find_builtin super().tearDown() def patchGettextFind(self): gettext_module.find = lambda *args, **kw: None def test_failure_finding_default_mo_files(self): """OSError is raised if the default language is unparseable.""" self.patchGettextFind() trans_real._translations = {} with self.assertRaises(OSError): activate('en') class NonDjangoLanguageTests(SimpleTestCase): """ A language non present in default Django languages can still be installed/used by a Django project. """ @override_settings( USE_I18N=True, LANGUAGES=[ ('en-us', 'English'), ('xxx', 'Somelanguage'), ], LANGUAGE_CODE='xxx', LOCALE_PATHS=[os.path.join(here, 'commands', 'locale')], ) def test_non_django_language(self): self.assertEqual(get_language(), 'xxx') self.assertEqual(gettext("year"), "reay") @override_settings(USE_I18N=True) def test_check_for_language(self): with tempfile.TemporaryDirectory() as app_dir: os.makedirs(os.path.join(app_dir, 'locale', 'dummy_Lang', 'LC_MESSAGES')) open(os.path.join(app_dir, 'locale', 'dummy_Lang', 'LC_MESSAGES', 'django.mo'), 'w').close() app_config = AppConfig('dummy_app', AppModuleStub(__path__=[app_dir])) with mock.patch('django.apps.apps.get_app_configs', return_value=[app_config]): self.assertIs(check_for_language('dummy-lang'), True) @override_settings( USE_I18N=True, LANGUAGES=[ ('en-us', 'English'), # xyz language has no locale files ('xyz', 'XYZ'), ], ) @translation.override('xyz') def test_plural_non_django_language(self): self.assertEqual(get_language(), 'xyz') self.assertEqual(ngettext('year', 'years', 2), 'years') @override_settings(USE_I18N=True) class WatchForTranslationChangesTests(SimpleTestCase): @override_settings(USE_I18N=False) def test_i18n_disabled(self): mocked_sender = mock.MagicMock() watch_for_translation_changes(mocked_sender) mocked_sender.watch_dir.assert_not_called() def test_i18n_enabled(self): mocked_sender = mock.MagicMock() watch_for_translation_changes(mocked_sender) self.assertGreater(mocked_sender.watch_dir.call_count, 1) def test_i18n_locale_paths(self): mocked_sender = mock.MagicMock() with tempfile.TemporaryDirectory() as app_dir: with self.settings(LOCALE_PATHS=[app_dir]): watch_for_translation_changes(mocked_sender) mocked_sender.watch_dir.assert_any_call(Path(app_dir), '**/*.mo') def test_i18n_app_dirs(self): mocked_sender = mock.MagicMock() with self.settings(INSTALLED_APPS=['tests.i18n.sampleproject']): watch_for_translation_changes(mocked_sender) project_dir = Path(__file__).parent / 'sampleproject' / 'locale' mocked_sender.watch_dir.assert_any_call(project_dir, '**/*.mo') def test_i18n_local_locale(self): mocked_sender = mock.MagicMock() watch_for_translation_changes(mocked_sender) locale_dir = Path(__file__).parent / 'locale' mocked_sender.watch_dir.assert_any_call(locale_dir, '**/*.mo') class TranslationFileChangedTests(SimpleTestCase): def setUp(self): self.gettext_translations = gettext_module._translations.copy() self.trans_real_translations = trans_real._translations.copy() def tearDown(self): gettext._translations = self.gettext_translations trans_real._translations = self.trans_real_translations def test_ignores_non_mo_files(self): gettext_module._translations = {'foo': 'bar'} path = Path('test.py') self.assertIsNone(translation_file_changed(None, path)) self.assertEqual(gettext_module._translations, {'foo': 'bar'}) def test_resets_cache_with_mo_files(self): gettext_module._translations = {'foo': 'bar'} trans_real._translations = {'foo': 'bar'} trans_real._default = 1 trans_real._active = False path = Path('test.mo') self.assertIs(translation_file_changed(None, path), True) self.assertEqual(gettext_module._translations, {}) self.assertEqual(trans_real._translations, {}) self.assertIsNone(trans_real._default) self.assertIsInstance(trans_real._active, Local) class UtilsTests(SimpleTestCase): def test_round_away_from_one(self): tests = [ (0, 0), (0., 0), (0.25, 0), (0.5, 0), (0.75, 0), (1, 1), (1., 1), (1.25, 2), (1.5, 2), (1.75, 2), (-0., 0), (-0.25, -1), (-0.5, -1), (-0.75, -1), (-1, -1), (-1., -1), (-1.25, -2), (-1.5, -2), (-1.75, -2), ] for value, expected in tests: with self.subTest(value=value): self.assertEqual(round_away_from_one(value), expected)
0aaa6b867ce928e456106f9a0a6e70788bf2eb802d5a8fc5f8a2c1812a7446cf
"""HTML utilities suitable for global use.""" import html import json import re from html.parser import HTMLParser from urllib.parse import ( parse_qsl, quote, unquote, urlencode, urlsplit, urlunsplit, ) from django.utils.functional import Promise, keep_lazy, keep_lazy_text from django.utils.http import RFC3986_GENDELIMS, RFC3986_SUBDELIMS from django.utils.safestring import SafeData, SafeString, mark_safe from django.utils.text import normalize_newlines # Configuration for urlize() function. TRAILING_PUNCTUATION_CHARS = '.,:;!' WRAPPING_PUNCTUATION = [('(', ')'), ('[', ']')] # List of possible strings used for bullets in bulleted lists. DOTS = ['&middot;', '*', '\u2022', '&#149;', '&bull;', '&#8226;'] unencoded_ampersands_re = re.compile(r'&(?!(\w+|#\d+);)') word_split_re = re.compile(r'''([\s<>"']+)''') simple_url_re = re.compile(r'^https?://\[?\w', re.IGNORECASE) simple_url_2_re = re.compile(r'^www\.|^(?!http)\w[^@]+\.(com|edu|gov|int|mil|net|org)($|/.*)$', re.IGNORECASE) @keep_lazy(str, SafeString) def escape(text): """ Return the given text with ampersands, quotes and angle brackets encoded for use in HTML. Always escape input, even if it's already escaped and marked as such. This may result in double-escaping. If this is a concern, use conditional_escape() instead. """ return mark_safe(html.escape(str(text))) _js_escapes = { ord('\\'): '\\u005C', ord('\''): '\\u0027', ord('"'): '\\u0022', ord('>'): '\\u003E', ord('<'): '\\u003C', ord('&'): '\\u0026', ord('='): '\\u003D', ord('-'): '\\u002D', ord(';'): '\\u003B', ord('`'): '\\u0060', ord('\u2028'): '\\u2028', ord('\u2029'): '\\u2029' } # Escape every ASCII character with a value less than 32. _js_escapes.update((ord('%c' % z), '\\u%04X' % z) for z in range(32)) @keep_lazy(str, SafeString) def escapejs(value): """Hex encode characters for use in JavaScript strings.""" return mark_safe(str(value).translate(_js_escapes)) _json_script_escapes = { ord('>'): '\\u003E', ord('<'): '\\u003C', ord('&'): '\\u0026', } def json_script(value, element_id): """ Escape all the HTML/XML special characters with their unicode escapes, so value is safe to be output anywhere except for inside a tag attribute. Wrap the escaped JSON in a script tag. """ from django.core.serializers.json import DjangoJSONEncoder json_str = json.dumps(value, cls=DjangoJSONEncoder).translate(_json_script_escapes) return format_html( '<script id="{}" type="application/json">{}</script>', element_id, mark_safe(json_str) ) def conditional_escape(text): """ Similar to escape(), except that it doesn't operate on pre-escaped strings. This function relies on the __html__ convention used both by Django's SafeData class and by third-party libraries like markupsafe. """ if isinstance(text, Promise): text = str(text) if hasattr(text, '__html__'): return text.__html__() else: return escape(text) def format_html(format_string, *args, **kwargs): """ Similar to str.format, but pass all arguments through conditional_escape(), and call mark_safe() on the result. This function should be used instead of str.format or % interpolation to build up small HTML fragments. """ args_safe = map(conditional_escape, args) kwargs_safe = {k: conditional_escape(v) for (k, v) in kwargs.items()} return mark_safe(format_string.format(*args_safe, **kwargs_safe)) def format_html_join(sep, format_string, args_generator): """ A wrapper of format_html, for the common case of a group of arguments that need to be formatted using the same format string, and then joined using 'sep'. 'sep' is also passed through conditional_escape. 'args_generator' should be an iterator that returns the sequence of 'args' that will be passed to format_html. Example: format_html_join('\n', "<li>{} {}</li>", ((u.first_name, u.last_name) for u in users)) """ return mark_safe(conditional_escape(sep).join( format_html(format_string, *args) for args in args_generator )) @keep_lazy_text def linebreaks(value, autoescape=False): """Convert newlines into <p> and <br>s.""" value = normalize_newlines(value) paras = re.split('\n{2,}', str(value)) if autoescape: paras = ['<p>%s</p>' % escape(p).replace('\n', '<br>') for p in paras] else: paras = ['<p>%s</p>' % p.replace('\n', '<br>') for p in paras] return '\n\n'.join(paras) class MLStripper(HTMLParser): def __init__(self): super().__init__(convert_charrefs=False) self.reset() self.fed = [] def handle_data(self, d): self.fed.append(d) def handle_entityref(self, name): self.fed.append('&%s;' % name) def handle_charref(self, name): self.fed.append('&#%s;' % name) def get_data(self): return ''.join(self.fed) def _strip_once(value): """ Internal tag stripping utility used by strip_tags. """ s = MLStripper() s.feed(value) s.close() return s.get_data() @keep_lazy_text def strip_tags(value): """Return the given HTML with all tags stripped.""" # Note: in typical case this loop executes _strip_once once. Loop condition # is redundant, but helps to reduce number of executions of _strip_once. value = str(value) while '<' in value and '>' in value: new_value = _strip_once(value) if len(new_value) >= len(value): # _strip_once was not able to detect more tags break value = new_value return value @keep_lazy_text def strip_spaces_between_tags(value): """Return the given HTML with spaces between tags removed.""" return re.sub(r'>\s+<', '><', str(value)) def smart_urlquote(url): """Quote a URL if it isn't already quoted.""" def unquote_quote(segment): segment = unquote(segment) # Tilde is part of RFC3986 Unreserved Characters # https://tools.ietf.org/html/rfc3986#section-2.3 # See also https://bugs.python.org/issue16285 return quote(segment, safe=RFC3986_SUBDELIMS + RFC3986_GENDELIMS + '~') # Handle IDN before quoting. try: scheme, netloc, path, query, fragment = urlsplit(url) except ValueError: # invalid IPv6 URL (normally square brackets in hostname part). return unquote_quote(url) try: netloc = netloc.encode('idna').decode('ascii') # IDN -> ACE except UnicodeError: # invalid domain part return unquote_quote(url) if query: # Separately unquoting key/value, so as to not mix querystring separators # included in query values. See #22267. query_parts = [(unquote(q[0]), unquote(q[1])) for q in parse_qsl(query, keep_blank_values=True)] # urlencode will take care of quoting query = urlencode(query_parts) path = unquote_quote(path) fragment = unquote_quote(fragment) return urlunsplit((scheme, netloc, path, query, fragment)) @keep_lazy_text def urlize(text, trim_url_limit=None, nofollow=False, autoescape=False): """ Convert any URLs in text into clickable links. Works on http://, https://, www. links, and also on links ending in one of the original seven gTLDs (.com, .edu, .gov, .int, .mil, .net, and .org). Links can have trailing punctuation (periods, commas, close-parens) and leading punctuation (opening parens) and it'll still do the right thing. If trim_url_limit is not None, truncate the URLs in the link text longer than this limit to trim_url_limit - 1 characters and append an ellipsis. If nofollow is True, give the links a rel="nofollow" attribute. If autoescape is True, autoescape the link text and URLs. """ safe_input = isinstance(text, SafeData) def trim_url(x, limit=trim_url_limit): if limit is None or len(x) <= limit: return x return '%s…' % x[:max(0, limit - 1)] def trim_punctuation(lead, middle, trail): """ Trim trailing and wrapping punctuation from `middle`. Return the items of the new state. """ # Continue trimming until middle remains unchanged. trimmed_something = True while trimmed_something: trimmed_something = False # Trim wrapping punctuation. for opening, closing in WRAPPING_PUNCTUATION: if middle.startswith(opening): middle = middle[len(opening):] lead += opening trimmed_something = True # Keep parentheses at the end only if they're balanced. if (middle.endswith(closing) and middle.count(closing) == middle.count(opening) + 1): middle = middle[:-len(closing)] trail = closing + trail trimmed_something = True # Trim trailing punctuation (after trimming wrapping punctuation, # as encoded entities contain ';'). Unescape entities to avoid # breaking them by removing ';'. middle_unescaped = html.unescape(middle) stripped = middle_unescaped.rstrip(TRAILING_PUNCTUATION_CHARS) if middle_unescaped != stripped: trail = middle[len(stripped):] + trail middle = middle[:len(stripped) - len(middle_unescaped)] trimmed_something = True return lead, middle, trail def is_email_simple(value): """Return True if value looks like an email address.""" # An @ must be in the middle of the value. if '@' not in value or value.startswith('@') or value.endswith('@'): return False try: p1, p2 = value.split('@') except ValueError: # value contains more than one @. return False # Dot must be in p2 (e.g. example.com) if '.' not in p2 or p2.startswith('.'): return False return True words = word_split_re.split(str(text)) for i, word in enumerate(words): if '.' in word or '@' in word or ':' in word: # lead: Current punctuation trimmed from the beginning of the word. # middle: Current state of the word. # trail: Current punctuation trimmed from the end of the word. lead, middle, trail = '', word, '' # Deal with punctuation. lead, middle, trail = trim_punctuation(lead, middle, trail) # Make URL we want to point to. url = None nofollow_attr = ' rel="nofollow"' if nofollow else '' if simple_url_re.match(middle): url = smart_urlquote(html.unescape(middle)) elif simple_url_2_re.match(middle): url = smart_urlquote('http://%s' % html.unescape(middle)) elif ':' not in middle and is_email_simple(middle): local, domain = middle.rsplit('@', 1) try: domain = domain.encode('idna').decode('ascii') except UnicodeError: continue url = 'mailto:%s@%s' % (local, domain) nofollow_attr = '' # Make link. if url: trimmed = trim_url(middle) if autoescape and not safe_input: lead, trail = escape(lead), escape(trail) trimmed = escape(trimmed) middle = '<a href="%s"%s>%s</a>' % (escape(url), nofollow_attr, trimmed) words[i] = mark_safe('%s%s%s' % (lead, middle, trail)) else: if safe_input: words[i] = mark_safe(word) elif autoescape: words[i] = escape(word) elif safe_input: words[i] = mark_safe(word) elif autoescape: words[i] = escape(word) return ''.join(words) def avoid_wrapping(value): """ Avoid text wrapping in the middle of a phrase by adding non-breaking spaces where there previously were normal spaces. """ return value.replace(" ", "\xa0") def html_safe(klass): """ A decorator that defines the __html__ method. This helps non-Django templates to detect classes whose __str__ methods return SafeString. """ if '__html__' in klass.__dict__: raise ValueError( "can't apply @html_safe to %s because it defines " "__html__()." % klass.__name__ ) if '__str__' not in klass.__dict__: raise ValueError( "can't apply @html_safe to %s because it doesn't " "define __str__()." % klass.__name__ ) klass_str = klass.__str__ klass.__str__ = lambda self: mark_safe(klass_str(self)) klass.__html__ = lambda self: str(self) return klass
59a6572762a18f3f9d8977a74601f2e4a12830c3d8d8769f5d754cf30ae7d838
import ipaddress import re from pathlib import Path from urllib.parse import urlsplit, urlunsplit from django.core.exceptions import ValidationError from django.utils.deconstruct import deconstructible from django.utils.functional import SimpleLazyObject from django.utils.ipv6 import is_valid_ipv6_address from django.utils.translation import gettext_lazy as _, ngettext_lazy # These values, if given to validate(), will trigger the self.required check. EMPTY_VALUES = (None, '', [], (), {}) def _lazy_re_compile(regex, flags=0): """Lazily compile a regex with flags.""" def _compile(): # Compile the regex if it was not passed pre-compiled. if isinstance(regex, str): return re.compile(regex, flags) else: assert not flags, "flags must be empty if regex is passed pre-compiled" return regex return SimpleLazyObject(_compile) @deconstructible class RegexValidator: regex = '' message = _('Enter a valid value.') code = 'invalid' inverse_match = False flags = 0 def __init__(self, regex=None, message=None, code=None, inverse_match=None, flags=None): if regex is not None: self.regex = regex if message is not None: self.message = message if code is not None: self.code = code if inverse_match is not None: self.inverse_match = inverse_match if flags is not None: self.flags = flags if self.flags and not isinstance(self.regex, str): raise TypeError("If the flags are set, regex must be a regular expression string.") self.regex = _lazy_re_compile(self.regex, self.flags) def __call__(self, value): """ Validate that the input contains (or does *not* contain, if inverse_match is True) a match for the regular expression. """ regex_matches = self.regex.search(str(value)) invalid_input = regex_matches if self.inverse_match else not regex_matches if invalid_input: raise ValidationError(self.message, code=self.code) def __eq__(self, other): return ( isinstance(other, RegexValidator) and self.regex.pattern == other.regex.pattern and self.regex.flags == other.regex.flags and (self.message == other.message) and (self.code == other.code) and (self.inverse_match == other.inverse_match) ) @deconstructible class URLValidator(RegexValidator): ul = '\u00a1-\uffff' # unicode letters range (must not be a raw string) # IP patterns ipv4_re = r'(?:25[0-5]|2[0-4]\d|[0-1]?\d?\d)(?:\.(?:25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}' ipv6_re = r'\[[0-9a-f:\.]+\]' # (simple regex, validated later) # Host patterns hostname_re = r'[a-z' + ul + r'0-9](?:[a-z' + ul + r'0-9-]{0,61}[a-z' + ul + r'0-9])?' # Max length for domain name labels is 63 characters per RFC 1034 sec. 3.1 domain_re = r'(?:\.(?!-)[a-z' + ul + r'0-9-]{1,63}(?<!-))*' tld_re = ( r'\.' # dot r'(?!-)' # can't start with a dash r'(?:[a-z' + ul + '-]{2,63}' # domain label r'|xn--[a-z0-9]{1,59})' # or punycode label r'(?<!-)' # can't end with a dash r'\.?' # may have a trailing dot ) host_re = '(' + hostname_re + domain_re + tld_re + '|localhost)' regex = _lazy_re_compile( r'^(?:[a-z0-9\.\-\+]*)://' # scheme is validated separately r'(?:[^\s:@/]+(?::[^\s:@/]*)?@)?' # user:pass authentication r'(?:' + ipv4_re + '|' + ipv6_re + '|' + host_re + ')' r'(?::\d{2,5})?' # port r'(?:[/?#][^\s]*)?' # resource path r'\Z', re.IGNORECASE) message = _('Enter a valid URL.') schemes = ['http', 'https', 'ftp', 'ftps'] def __init__(self, schemes=None, **kwargs): super().__init__(**kwargs) if schemes is not None: self.schemes = schemes def __call__(self, value): # Check first if the scheme is valid scheme = value.split('://')[0].lower() if scheme not in self.schemes: raise ValidationError(self.message, code=self.code) # Then check full URL try: super().__call__(value) except ValidationError as e: # Trivial case failed. Try for possible IDN domain if value: try: scheme, netloc, path, query, fragment = urlsplit(value) except ValueError: # for example, "Invalid IPv6 URL" raise ValidationError(self.message, code=self.code) try: netloc = netloc.encode('idna').decode('ascii') # IDN -> ACE except UnicodeError: # invalid domain part raise e url = urlunsplit((scheme, netloc, path, query, fragment)) super().__call__(url) else: raise else: # Now verify IPv6 in the netloc part host_match = re.search(r'^\[(.+)\](?::\d{2,5})?$', urlsplit(value).netloc) if host_match: potential_ip = host_match.groups()[0] try: validate_ipv6_address(potential_ip) except ValidationError: raise ValidationError(self.message, code=self.code) # The maximum length of a full host name is 253 characters per RFC 1034 # section 3.1. It's defined to be 255 bytes or less, but this includes # one byte for the length of the name and one byte for the trailing dot # that's used to indicate absolute names in DNS. if len(urlsplit(value).netloc) > 253: raise ValidationError(self.message, code=self.code) integer_validator = RegexValidator( _lazy_re_compile(r'^-?\d+\Z'), message=_('Enter a valid integer.'), code='invalid', ) def validate_integer(value): return integer_validator(value) @deconstructible class EmailValidator: message = _('Enter a valid email address.') code = 'invalid' user_regex = _lazy_re_compile( r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*\Z" # dot-atom r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|\\[\001-\011\013\014\016-\177])*"\Z)', # quoted-string re.IGNORECASE) domain_regex = _lazy_re_compile( # max length for domain name labels is 63 characters per RFC 1034 r'((?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+)(?:[A-Z0-9-]{2,63}(?<!-))\Z', re.IGNORECASE) literal_regex = _lazy_re_compile( # literal form, ipv4 or ipv6 address (SMTP 4.1.3) r'\[([A-f0-9:\.]+)\]\Z', re.IGNORECASE) domain_whitelist = ['localhost'] def __init__(self, message=None, code=None, whitelist=None): if message is not None: self.message = message if code is not None: self.code = code if whitelist is not None: self.domain_whitelist = whitelist def __call__(self, value): if not value or '@' not in value: raise ValidationError(self.message, code=self.code) user_part, domain_part = value.rsplit('@', 1) if not self.user_regex.match(user_part): raise ValidationError(self.message, code=self.code) if (domain_part not in self.domain_whitelist and not self.validate_domain_part(domain_part)): # Try for possible IDN domain-part try: domain_part = domain_part.encode('idna').decode('ascii') except UnicodeError: pass else: if self.validate_domain_part(domain_part): return raise ValidationError(self.message, code=self.code) def validate_domain_part(self, domain_part): if self.domain_regex.match(domain_part): return True literal_match = self.literal_regex.match(domain_part) if literal_match: ip_address = literal_match.group(1) try: validate_ipv46_address(ip_address) return True except ValidationError: pass return False def __eq__(self, other): return ( isinstance(other, EmailValidator) and (self.domain_whitelist == other.domain_whitelist) and (self.message == other.message) and (self.code == other.code) ) validate_email = EmailValidator() slug_re = _lazy_re_compile(r'^[-a-zA-Z0-9_]+\Z') validate_slug = RegexValidator( slug_re, # Translators: "letters" means latin letters: a-z and A-Z. _('Enter a valid “slug” consisting of letters, numbers, underscores or hyphens.'), 'invalid' ) slug_unicode_re = _lazy_re_compile(r'^[-\w]+\Z') validate_unicode_slug = RegexValidator( slug_unicode_re, _('Enter a valid “slug” consisting of Unicode letters, numbers, underscores, or hyphens.'), 'invalid' ) def validate_ipv4_address(value): try: ipaddress.IPv4Address(value) except ValueError: raise ValidationError(_('Enter a valid IPv4 address.'), code='invalid') def validate_ipv6_address(value): if not is_valid_ipv6_address(value): raise ValidationError(_('Enter a valid IPv6 address.'), code='invalid') def validate_ipv46_address(value): try: validate_ipv4_address(value) except ValidationError: try: validate_ipv6_address(value) except ValidationError: raise ValidationError(_('Enter a valid IPv4 or IPv6 address.'), code='invalid') ip_address_validator_map = { 'both': ([validate_ipv46_address], _('Enter a valid IPv4 or IPv6 address.')), 'ipv4': ([validate_ipv4_address], _('Enter a valid IPv4 address.')), 'ipv6': ([validate_ipv6_address], _('Enter a valid IPv6 address.')), } def ip_address_validators(protocol, unpack_ipv4): """ Depending on the given parameters, return the appropriate validators for the GenericIPAddressField. """ if protocol != 'both' and unpack_ipv4: raise ValueError( "You can only use `unpack_ipv4` if `protocol` is set to 'both'") try: return ip_address_validator_map[protocol.lower()] except KeyError: raise ValueError("The protocol '%s' is unknown. Supported: %s" % (protocol, list(ip_address_validator_map))) def int_list_validator(sep=',', message=None, code='invalid', allow_negative=False): regexp = _lazy_re_compile(r'^%(neg)s\d+(?:%(sep)s%(neg)s\d+)*\Z' % { 'neg': '(-)?' if allow_negative else '', 'sep': re.escape(sep), }) return RegexValidator(regexp, message=message, code=code) validate_comma_separated_integer_list = int_list_validator( message=_('Enter only digits separated by commas.'), ) @deconstructible class BaseValidator: message = _('Ensure this value is %(limit_value)s (it is %(show_value)s).') code = 'limit_value' def __init__(self, limit_value, message=None): self.limit_value = limit_value if message: self.message = message def __call__(self, value): cleaned = self.clean(value) limit_value = self.limit_value() if callable(self.limit_value) else self.limit_value params = {'limit_value': limit_value, 'show_value': cleaned, 'value': value} if self.compare(cleaned, limit_value): raise ValidationError(self.message, code=self.code, params=params) def __eq__(self, other): return ( isinstance(other, self.__class__) and self.limit_value == other.limit_value and self.message == other.message and self.code == other.code ) def compare(self, a, b): return a is not b def clean(self, x): return x @deconstructible class MaxValueValidator(BaseValidator): message = _('Ensure this value is less than or equal to %(limit_value)s.') code = 'max_value' def compare(self, a, b): return a > b @deconstructible class MinValueValidator(BaseValidator): message = _('Ensure this value is greater than or equal to %(limit_value)s.') code = 'min_value' def compare(self, a, b): return a < b @deconstructible class MinLengthValidator(BaseValidator): message = ngettext_lazy( 'Ensure this value has at least %(limit_value)d character (it has %(show_value)d).', 'Ensure this value has at least %(limit_value)d characters (it has %(show_value)d).', 'limit_value') code = 'min_length' def compare(self, a, b): return a < b def clean(self, x): return len(x) @deconstructible class MaxLengthValidator(BaseValidator): message = ngettext_lazy( 'Ensure this value has at most %(limit_value)d character (it has %(show_value)d).', 'Ensure this value has at most %(limit_value)d characters (it has %(show_value)d).', 'limit_value') code = 'max_length' def compare(self, a, b): return a > b def clean(self, x): return len(x) @deconstructible class DecimalValidator: """ Validate that the input does not exceed the maximum number of digits expected, otherwise raise ValidationError. """ messages = { 'invalid': _('Enter a number.'), 'max_digits': ngettext_lazy( 'Ensure that there are no more than %(max)s digit in total.', 'Ensure that there are no more than %(max)s digits in total.', 'max' ), 'max_decimal_places': ngettext_lazy( 'Ensure that there are no more than %(max)s decimal place.', 'Ensure that there are no more than %(max)s decimal places.', 'max' ), 'max_whole_digits': ngettext_lazy( 'Ensure that there are no more than %(max)s digit before the decimal point.', 'Ensure that there are no more than %(max)s digits before the decimal point.', 'max' ), } def __init__(self, max_digits, decimal_places): self.max_digits = max_digits self.decimal_places = decimal_places def __call__(self, value): digit_tuple, exponent = value.as_tuple()[1:] if exponent in {'F', 'n', 'N'}: raise ValidationError(self.messages['invalid']) if exponent >= 0: # A positive exponent adds that many trailing zeros. digits = len(digit_tuple) + exponent decimals = 0 else: # If the absolute value of the negative exponent is larger than the # number of digits, then it's the same as the number of digits, # because it'll consume all of the digits in digit_tuple and then # add abs(exponent) - len(digit_tuple) leading zeros after the # decimal point. if abs(exponent) > len(digit_tuple): digits = decimals = abs(exponent) else: digits = len(digit_tuple) decimals = abs(exponent) whole_digits = digits - decimals if self.max_digits is not None and digits > self.max_digits: raise ValidationError( self.messages['max_digits'], code='max_digits', params={'max': self.max_digits}, ) if self.decimal_places is not None and decimals > self.decimal_places: raise ValidationError( self.messages['max_decimal_places'], code='max_decimal_places', params={'max': self.decimal_places}, ) if (self.max_digits is not None and self.decimal_places is not None and whole_digits > (self.max_digits - self.decimal_places)): raise ValidationError( self.messages['max_whole_digits'], code='max_whole_digits', params={'max': (self.max_digits - self.decimal_places)}, ) def __eq__(self, other): return ( isinstance(other, self.__class__) and self.max_digits == other.max_digits and self.decimal_places == other.decimal_places ) @deconstructible class FileExtensionValidator: message = _( 'File extension “%(extension)s” is not allowed. ' 'Allowed extensions are: %(allowed_extensions)s.' ) code = 'invalid_extension' def __init__(self, allowed_extensions=None, message=None, code=None): if allowed_extensions is not None: allowed_extensions = [allowed_extension.lower() for allowed_extension in allowed_extensions] self.allowed_extensions = allowed_extensions if message is not None: self.message = message if code is not None: self.code = code def __call__(self, value): extension = Path(value.name).suffix[1:].lower() if self.allowed_extensions is not None and extension not in self.allowed_extensions: raise ValidationError( self.message, code=self.code, params={ 'extension': extension, 'allowed_extensions': ', '.join(self.allowed_extensions) } ) def __eq__(self, other): return ( isinstance(other, self.__class__) and self.allowed_extensions == other.allowed_extensions and self.message == other.message and self.code == other.code ) def get_available_image_extensions(): try: from PIL import Image except ImportError: return [] else: Image.init() return [ext.lower()[1:] for ext in Image.EXTENSION] def validate_image_file_extension(value): return FileExtensionValidator(allowed_extensions=get_available_image_extensions())(value) @deconstructible class ProhibitNullCharactersValidator: """Validate that the string doesn't contain the null character.""" message = _('Null characters are not allowed.') code = 'null_characters_not_allowed' def __init__(self, message=None, code=None): if message is not None: self.message = message if code is not None: self.code = code def __call__(self, value): if '\x00' in str(value): raise ValidationError(self.message, code=self.code) def __eq__(self, other): return ( isinstance(other, self.__class__) and self.message == other.message and self.code == other.code )
3362ec6cc299d2962843451b7704b9aa58b7a13e45706cc14ea137b6ad71db98
import asyncore import base64 import mimetypes import os import shutil import smtpd import sys import tempfile import threading from email import charset, message_from_binary_file, message_from_bytes from email.header import Header from email.mime.text import MIMEText from email.utils import parseaddr from io import StringIO from smtplib import SMTP, SMTPAuthenticationError, SMTPException from ssl import SSLError from django.core import mail from django.core.mail import ( EmailMessage, EmailMultiAlternatives, mail_admins, mail_managers, send_mail, send_mass_mail, ) from django.core.mail.backends import console, dummy, filebased, locmem, smtp from django.core.mail.message import BadHeaderError, sanitize_address from django.test import SimpleTestCase, override_settings from django.test.utils import requires_tz_support from django.utils.translation import gettext_lazy class HeadersCheckMixin: def assertMessageHasHeaders(self, message, headers): """ Asserts that the `message` has all `headers`. message: can be an instance of an email.Message subclass or a string with the contents of an email message. headers: should be a set of (header-name, header-value) tuples. """ if isinstance(message, bytes): message = message_from_bytes(message) msg_headers = set(message.items()) self.assertTrue(headers.issubset(msg_headers), msg='Message is missing ' 'the following headers: %s' % (headers - msg_headers),) class MailTests(HeadersCheckMixin, SimpleTestCase): """ Non-backend specific tests. """ def get_decoded_attachments(self, django_message): """ Encode the specified django.core.mail.message.EmailMessage, then decode it using Python's email.parser module and, for each attachment of the message, return a list of tuples with (filename, content, mimetype). """ msg_bytes = django_message.message().as_bytes() email_message = message_from_bytes(msg_bytes) def iter_attachments(): for i in email_message.walk(): if i.get_content_disposition() == 'attachment': filename = i.get_filename() content = i.get_payload(decode=True) mimetype = i.get_content_type() yield filename, content, mimetype return list(iter_attachments()) def test_ascii(self): email = EmailMessage('Subject', 'Content', '[email protected]', ['[email protected]']) message = email.message() self.assertEqual(message['Subject'], 'Subject') self.assertEqual(message.get_payload(), 'Content') self.assertEqual(message['From'], '[email protected]') self.assertEqual(message['To'], '[email protected]') def test_multiple_recipients(self): email = EmailMessage('Subject', 'Content', '[email protected]', ['[email protected]', '[email protected]']) message = email.message() self.assertEqual(message['Subject'], 'Subject') self.assertEqual(message.get_payload(), 'Content') self.assertEqual(message['From'], '[email protected]') self.assertEqual(message['To'], '[email protected], [email protected]') def test_header_omitted_for_no_to_recipients(self): message = EmailMessage('Subject', 'Content', '[email protected]', cc=['[email protected]']).message() self.assertNotIn('To', message) def test_recipients_with_empty_strings(self): """ Empty strings in various recipient arguments are always stripped off the final recipient list. """ email = EmailMessage( 'Subject', 'Content', '[email protected]', ['[email protected]', ''], cc=['[email protected]', ''], bcc=['', '[email protected]'], reply_to=['', None], ) self.assertEqual( email.recipients(), ['[email protected]', '[email protected]', '[email protected]'] ) def test_cc(self): """Regression test for #7722""" email = EmailMessage('Subject', 'Content', '[email protected]', ['[email protected]'], cc=['[email protected]']) message = email.message() self.assertEqual(message['Cc'], '[email protected]') self.assertEqual(email.recipients(), ['[email protected]', '[email protected]']) # Test multiple CC with multiple To email = EmailMessage( 'Subject', 'Content', '[email protected]', ['[email protected]', '[email protected]'], cc=['[email protected]', '[email protected]'] ) message = email.message() self.assertEqual(message['Cc'], '[email protected], [email protected]') self.assertEqual( email.recipients(), ['[email protected]', '[email protected]', '[email protected]', '[email protected]'] ) # Testing with Bcc email = EmailMessage( 'Subject', 'Content', '[email protected]', ['[email protected]', '[email protected]'], cc=['[email protected]', '[email protected]'], bcc=['[email protected]'] ) message = email.message() self.assertEqual(message['Cc'], '[email protected], [email protected]') self.assertEqual( email.recipients(), ['[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]'] ) def test_cc_headers(self): message = EmailMessage( 'Subject', 'Content', '[email protected]', ['[email protected]'], cc=['[email protected]'], headers={'Cc': '[email protected]'}, ).message() self.assertEqual(message['Cc'], '[email protected]') def test_cc_in_headers_only(self): message = EmailMessage( 'Subject', 'Content', '[email protected]', ['[email protected]'], headers={'Cc': '[email protected]'}, ).message() self.assertEqual(message['Cc'], '[email protected]') def test_reply_to(self): email = EmailMessage( 'Subject', 'Content', '[email protected]', ['[email protected]'], reply_to=['[email protected]'], ) message = email.message() self.assertEqual(message['Reply-To'], '[email protected]') email = EmailMessage( 'Subject', 'Content', '[email protected]', ['[email protected]'], reply_to=['[email protected]', '[email protected]'] ) message = email.message() self.assertEqual(message['Reply-To'], '[email protected], [email protected]') def test_recipients_as_tuple(self): email = EmailMessage( 'Subject', 'Content', '[email protected]', ('[email protected]', '[email protected]'), cc=('[email protected]', '[email protected]'), bcc=('[email protected]',) ) message = email.message() self.assertEqual(message['Cc'], '[email protected], [email protected]') self.assertEqual( email.recipients(), ['[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]'] ) def test_recipients_as_string(self): with self.assertRaisesMessage(TypeError, '"to" argument must be a list or tuple'): EmailMessage(to='[email protected]') with self.assertRaisesMessage(TypeError, '"cc" argument must be a list or tuple'): EmailMessage(cc='[email protected]') with self.assertRaisesMessage(TypeError, '"bcc" argument must be a list or tuple'): EmailMessage(bcc='[email protected]') with self.assertRaisesMessage(TypeError, '"reply_to" argument must be a list or tuple'): EmailMessage(reply_to='[email protected]') def test_header_injection(self): email = EmailMessage('Subject\nInjection Test', 'Content', '[email protected]', ['[email protected]']) with self.assertRaises(BadHeaderError): email.message() email = EmailMessage( gettext_lazy('Subject\nInjection Test'), 'Content', '[email protected]', ['[email protected]'] ) with self.assertRaises(BadHeaderError): email.message() def test_space_continuation(self): """ Test for space continuation character in long (ASCII) subject headers (#7747) """ email = EmailMessage( 'Long subject lines that get wrapped should contain a space ' 'continuation character to get expected behavior in Outlook and Thunderbird', 'Content', '[email protected]', ['[email protected]'] ) message = email.message() self.assertEqual( message['Subject'].encode(), b'Long subject lines that get wrapped should contain a space continuation\n' b' character to get expected behavior in Outlook and Thunderbird' ) def test_message_header_overrides(self): """ Specifying dates or message-ids in the extra headers overrides the default values (#9233) """ headers = {"date": "Fri, 09 Nov 2001 01:08:47 -0000", "Message-ID": "foo"} email = EmailMessage('subject', 'content', '[email protected]', ['[email protected]'], headers=headers) self.assertMessageHasHeaders(email.message(), { ('Content-Transfer-Encoding', '7bit'), ('Content-Type', 'text/plain; charset="utf-8"'), ('From', '[email protected]'), ('MIME-Version', '1.0'), ('Message-ID', 'foo'), ('Subject', 'subject'), ('To', '[email protected]'), ('date', 'Fri, 09 Nov 2001 01:08:47 -0000'), }) def test_from_header(self): """ Make sure we can manually set the From header (#9214) """ email = EmailMessage( 'Subject', 'Content', '[email protected]', ['[email protected]'], headers={'From': '[email protected]'}, ) message = email.message() self.assertEqual(message['From'], '[email protected]') def test_to_header(self): """ Make sure we can manually set the To header (#17444) """ email = EmailMessage('Subject', 'Content', '[email protected]', ['[email protected]', '[email protected]'], headers={'To': '[email protected]'}) message = email.message() self.assertEqual(message['To'], '[email protected]') self.assertEqual(email.to, ['[email protected]', '[email protected]']) # If we don't set the To header manually, it should default to the `to` argument to the constructor email = EmailMessage('Subject', 'Content', '[email protected]', ['[email protected]', '[email protected]']) message = email.message() self.assertEqual(message['To'], '[email protected], [email protected]') self.assertEqual(email.to, ['[email protected]', '[email protected]']) def test_to_in_headers_only(self): message = EmailMessage( 'Subject', 'Content', '[email protected]', headers={'To': '[email protected]'}, ).message() self.assertEqual(message['To'], '[email protected]') def test_reply_to_header(self): """ Specifying 'Reply-To' in headers should override reply_to. """ email = EmailMessage( 'Subject', 'Content', '[email protected]', ['[email protected]'], reply_to=['[email protected]'], headers={'Reply-To': '[email protected]'}, ) message = email.message() self.assertEqual(message['Reply-To'], '[email protected]') def test_reply_to_in_headers_only(self): message = EmailMessage( 'Subject', 'Content', '[email protected]', ['[email protected]'], headers={'Reply-To': '[email protected]'}, ).message() self.assertEqual(message['Reply-To'], '[email protected]') def test_multiple_message_call(self): """ Regression for #13259 - Make sure that headers are not changed when calling EmailMessage.message() """ email = EmailMessage( 'Subject', 'Content', '[email protected]', ['[email protected]'], headers={'From': '[email protected]'}, ) message = email.message() self.assertEqual(message['From'], '[email protected]') message = email.message() self.assertEqual(message['From'], '[email protected]') def test_unicode_address_header(self): """ Regression for #11144 - When a to/from/cc header contains unicode, make sure the email addresses are parsed correctly (especially with regards to commas) """ email = EmailMessage( 'Subject', 'Content', '[email protected]', ['"Firstname Sürname" <[email protected]>', '[email protected]'], ) self.assertEqual( email.message()['To'], '=?utf-8?q?Firstname_S=C3=BCrname?= <[email protected]>, [email protected]' ) email = EmailMessage( 'Subject', 'Content', '[email protected]', ['"Sürname, Firstname" <[email protected]>', '[email protected]'], ) self.assertEqual( email.message()['To'], '=?utf-8?q?S=C3=BCrname=2C_Firstname?= <[email protected]>, [email protected]' ) def test_unicode_headers(self): email = EmailMessage( 'Gżegżółka', 'Content', '[email protected]', ['[email protected]'], headers={ 'Sender': '"Firstname Sürname" <[email protected]>', 'Comments': 'My Sürname is non-ASCII', }, ) message = email.message() self.assertEqual(message['Subject'], '=?utf-8?b?R8W8ZWfFvMOzxYJrYQ==?=') self.assertEqual(message['Sender'], '=?utf-8?q?Firstname_S=C3=BCrname?= <[email protected]>') self.assertEqual(message['Comments'], '=?utf-8?q?My_S=C3=BCrname_is_non-ASCII?=') def test_safe_mime_multipart(self): """ Make sure headers can be set with a different encoding than utf-8 in SafeMIMEMultipart as well """ headers = {"Date": "Fri, 09 Nov 2001 01:08:47 -0000", "Message-ID": "foo"} from_email, to = '[email protected]', '"Sürname, Firstname" <[email protected]>' text_content = 'This is an important message.' html_content = '<p>This is an <strong>important</strong> message.</p>' msg = EmailMultiAlternatives('Message from Firstname Sürname', text_content, from_email, [to], headers=headers) msg.attach_alternative(html_content, "text/html") msg.encoding = 'iso-8859-1' self.assertEqual(msg.message()['To'], '=?iso-8859-1?q?S=FCrname=2C_Firstname?= <[email protected]>') self.assertEqual(msg.message()['Subject'], '=?iso-8859-1?q?Message_from_Firstname_S=FCrname?=') def test_safe_mime_multipart_with_attachments(self): """ EmailMultiAlternatives includes alternatives if the body is empty and it has attachments. """ msg = EmailMultiAlternatives(body='') html_content = '<p>This is <strong>html</strong></p>' msg.attach_alternative(html_content, 'text/html') msg.attach('example.txt', 'Text file content', 'text/plain') self.assertIn(html_content, msg.message().as_string()) def test_none_body(self): msg = EmailMessage('subject', None, '[email protected]', ['[email protected]']) self.assertEqual(msg.body, '') self.assertEqual(msg.message().get_payload(), '') def test_encoding(self): """ Regression for #12791 - Encode body correctly with other encodings than utf-8 """ email = EmailMessage('Subject', 'Firstname Sürname is a great guy.', '[email protected]', ['[email protected]']) email.encoding = 'iso-8859-1' message = email.message() self.assertMessageHasHeaders(message, { ('MIME-Version', '1.0'), ('Content-Type', 'text/plain; charset="iso-8859-1"'), ('Content-Transfer-Encoding', 'quoted-printable'), ('Subject', 'Subject'), ('From', '[email protected]'), ('To', '[email protected]')}) self.assertEqual(message.get_payload(), 'Firstname S=FCrname is a great guy.') # Make sure MIME attachments also works correctly with other encodings than utf-8 text_content = 'Firstname Sürname is a great guy.' html_content = '<p>Firstname Sürname is a <strong>great</strong> guy.</p>' msg = EmailMultiAlternatives('Subject', text_content, '[email protected]', ['[email protected]']) msg.encoding = 'iso-8859-1' msg.attach_alternative(html_content, "text/html") payload0 = msg.message().get_payload(0) self.assertMessageHasHeaders(payload0, { ('MIME-Version', '1.0'), ('Content-Type', 'text/plain; charset="iso-8859-1"'), ('Content-Transfer-Encoding', 'quoted-printable')}) self.assertTrue(payload0.as_bytes().endswith(b'\n\nFirstname S=FCrname is a great guy.')) payload1 = msg.message().get_payload(1) self.assertMessageHasHeaders(payload1, { ('MIME-Version', '1.0'), ('Content-Type', 'text/html; charset="iso-8859-1"'), ('Content-Transfer-Encoding', 'quoted-printable')}) self.assertTrue( payload1.as_bytes().endswith(b'\n\n<p>Firstname S=FCrname is a <strong>great</strong> guy.</p>') ) def test_attachments(self): """Regression test for #9367""" headers = {"Date": "Fri, 09 Nov 2001 01:08:47 -0000", "Message-ID": "foo"} subject, from_email, to = 'hello', '[email protected]', '[email protected]' text_content = 'This is an important message.' html_content = '<p>This is an <strong>important</strong> message.</p>' msg = EmailMultiAlternatives(subject, text_content, from_email, [to], headers=headers) msg.attach_alternative(html_content, "text/html") msg.attach("an attachment.pdf", b"%PDF-1.4.%...", mimetype="application/pdf") msg_bytes = msg.message().as_bytes() message = message_from_bytes(msg_bytes) self.assertTrue(message.is_multipart()) self.assertEqual(message.get_content_type(), 'multipart/mixed') self.assertEqual(message.get_default_type(), 'text/plain') payload = message.get_payload() self.assertEqual(payload[0].get_content_type(), 'multipart/alternative') self.assertEqual(payload[1].get_content_type(), 'application/pdf') def test_attachments_two_tuple(self): msg = EmailMessage(attachments=[('filename1', 'content1')]) filename, content, mimetype = self.get_decoded_attachments(msg)[0] self.assertEqual(filename, 'filename1') self.assertEqual(content, b'content1') self.assertEqual(mimetype, 'application/octet-stream') def test_attachments_MIMEText(self): txt = MIMEText('content1') msg = EmailMessage(attachments=[txt]) payload = msg.message().get_payload() self.assertEqual(payload[0], txt) def test_non_ascii_attachment_filename(self): """Regression test for #14964""" headers = {"Date": "Fri, 09 Nov 2001 01:08:47 -0000", "Message-ID": "foo"} subject, from_email, to = 'hello', '[email protected]', '[email protected]' content = 'This is the message.' msg = EmailMessage(subject, content, from_email, [to], headers=headers) # Unicode in file name msg.attach("une pièce jointe.pdf", b"%PDF-1.4.%...", mimetype="application/pdf") msg_bytes = msg.message().as_bytes() message = message_from_bytes(msg_bytes) payload = message.get_payload() self.assertEqual(payload[1].get_filename(), 'une pièce jointe.pdf') def test_attach_file(self): """ Test attaching a file against different mimetypes and make sure that a file will be attached and sent properly even if an invalid mimetype is specified. """ files = ( # filename, actual mimetype ('file.txt', 'text/plain'), ('file.png', 'image/png'), ('file_txt', None), ('file_png', None), ('file_txt.png', 'image/png'), ('file_png.txt', 'text/plain'), ('file.eml', 'message/rfc822'), ) test_mimetypes = ['text/plain', 'image/png', None] for basename, real_mimetype in files: for mimetype in test_mimetypes: email = EmailMessage('subject', 'body', '[email protected]', ['[email protected]']) self.assertEqual(mimetypes.guess_type(basename)[0], real_mimetype) self.assertEqual(email.attachments, []) file_path = os.path.join(os.path.dirname(__file__), 'attachments', basename) email.attach_file(file_path, mimetype=mimetype) self.assertEqual(len(email.attachments), 1) self.assertIn(basename, email.attachments[0]) msgs_sent_num = email.send() self.assertEqual(msgs_sent_num, 1) def test_attach_text_as_bytes(self): msg = EmailMessage('subject', 'body', '[email protected]', ['[email protected]']) msg.attach('file.txt', b'file content') sent_num = msg.send() self.assertEqual(sent_num, 1) filename, content, mimetype = self.get_decoded_attachments(msg)[0] self.assertEqual(filename, 'file.txt') self.assertEqual(content, b'file content') self.assertEqual(mimetype, 'text/plain') def test_attach_utf8_text_as_bytes(self): """ Non-ASCII characters encoded as valid UTF-8 are correctly transported and decoded. """ msg = EmailMessage('subject', 'body', '[email protected]', ['[email protected]']) msg.attach('file.txt', b'\xc3\xa4') # UTF-8 encoded a umlaut. filename, content, mimetype = self.get_decoded_attachments(msg)[0] self.assertEqual(filename, 'file.txt') self.assertEqual(content, b'\xc3\xa4') self.assertEqual(mimetype, 'text/plain') def test_attach_non_utf8_text_as_bytes(self): """ Binary data that can't be decoded as UTF-8 overrides the MIME type instead of decoding the data. """ msg = EmailMessage('subject', 'body', '[email protected]', ['[email protected]']) msg.attach('file.txt', b'\xff') # Invalid UTF-8. filename, content, mimetype = self.get_decoded_attachments(msg)[0] self.assertEqual(filename, 'file.txt') # Content should be passed through unmodified. self.assertEqual(content, b'\xff') self.assertEqual(mimetype, 'application/octet-stream') def test_dummy_backend(self): """ Make sure that dummy backends returns correct number of sent messages """ connection = dummy.EmailBackend() email = EmailMessage( 'Subject', 'Content', '[email protected]', ['[email protected]'], headers={'From': '[email protected]'}, ) self.assertEqual(connection.send_messages([email, email, email]), 3) def test_arbitrary_keyword(self): """ Make sure that get_connection() accepts arbitrary keyword that might be used with custom backends. """ c = mail.get_connection(fail_silently=True, foo='bar') self.assertTrue(c.fail_silently) def test_custom_backend(self): """Test custom backend defined in this suite.""" conn = mail.get_connection('mail.custombackend.EmailBackend') self.assertTrue(hasattr(conn, 'test_outbox')) email = EmailMessage( 'Subject', 'Content', '[email protected]', ['[email protected]'], headers={'From': '[email protected]'}, ) conn.send_messages([email]) self.assertEqual(len(conn.test_outbox), 1) def test_backend_arg(self): """Test backend argument of mail.get_connection()""" self.assertIsInstance(mail.get_connection('django.core.mail.backends.smtp.EmailBackend'), smtp.EmailBackend) self.assertIsInstance( mail.get_connection('django.core.mail.backends.locmem.EmailBackend'), locmem.EmailBackend ) self.assertIsInstance(mail.get_connection('django.core.mail.backends.dummy.EmailBackend'), dummy.EmailBackend) self.assertIsInstance( mail.get_connection('django.core.mail.backends.console.EmailBackend'), console.EmailBackend ) with tempfile.TemporaryDirectory() as tmp_dir: self.assertIsInstance( mail.get_connection('django.core.mail.backends.filebased.EmailBackend', file_path=tmp_dir), filebased.EmailBackend ) self.assertIsInstance(mail.get_connection(), locmem.EmailBackend) @override_settings( EMAIL_BACKEND='django.core.mail.backends.locmem.EmailBackend', ADMINS=[('nobody', '[email protected]')], MANAGERS=[('nobody', '[email protected]')]) def test_connection_arg(self): """Test connection argument to send_mail(), et. al.""" mail.outbox = [] # Send using non-default connection connection = mail.get_connection('mail.custombackend.EmailBackend') send_mail('Subject', 'Content', '[email protected]', ['[email protected]'], connection=connection) self.assertEqual(mail.outbox, []) self.assertEqual(len(connection.test_outbox), 1) self.assertEqual(connection.test_outbox[0].subject, 'Subject') connection = mail.get_connection('mail.custombackend.EmailBackend') send_mass_mail([ ('Subject1', 'Content1', '[email protected]', ['[email protected]']), ('Subject2', 'Content2', '[email protected]', ['[email protected]']), ], connection=connection) self.assertEqual(mail.outbox, []) self.assertEqual(len(connection.test_outbox), 2) self.assertEqual(connection.test_outbox[0].subject, 'Subject1') self.assertEqual(connection.test_outbox[1].subject, 'Subject2') connection = mail.get_connection('mail.custombackend.EmailBackend') mail_admins('Admin message', 'Content', connection=connection) self.assertEqual(mail.outbox, []) self.assertEqual(len(connection.test_outbox), 1) self.assertEqual(connection.test_outbox[0].subject, '[Django] Admin message') connection = mail.get_connection('mail.custombackend.EmailBackend') mail_managers('Manager message', 'Content', connection=connection) self.assertEqual(mail.outbox, []) self.assertEqual(len(connection.test_outbox), 1) self.assertEqual(connection.test_outbox[0].subject, '[Django] Manager message') def test_dont_mangle_from_in_body(self): # Regression for #13433 - Make sure that EmailMessage doesn't mangle # 'From ' in message body. email = EmailMessage( 'Subject', 'From the future', '[email protected]', ['[email protected]'], headers={'From': '[email protected]'}, ) self.assertNotIn(b'>From the future', email.message().as_bytes()) def test_dont_base64_encode(self): # Ticket #3472 # Shouldn't use Base64 encoding at all msg = EmailMessage( 'Subject', 'UTF-8 encoded body', '[email protected]', ['[email protected]'], headers={'From': '[email protected]'}, ) self.assertIn(b'Content-Transfer-Encoding: 7bit', msg.message().as_bytes()) # Ticket #11212 # Shouldn't use quoted printable, should detect it can represent content with 7 bit data msg = EmailMessage( 'Subject', 'Body with only ASCII characters.', '[email protected]', ['[email protected]'], headers={'From': '[email protected]'}, ) s = msg.message().as_bytes() self.assertIn(b'Content-Transfer-Encoding: 7bit', s) # Shouldn't use quoted printable, should detect it can represent content with 8 bit data msg = EmailMessage( 'Subject', 'Body with latin characters: àáä.', '[email protected]', ['[email protected]'], headers={'From': '[email protected]'}, ) s = msg.message().as_bytes() self.assertIn(b'Content-Transfer-Encoding: 8bit', s) s = msg.message().as_string() self.assertIn('Content-Transfer-Encoding: 8bit', s) msg = EmailMessage( 'Subject', 'Body with non latin characters: А Б В Г Д Е Ж Ѕ З И І К Л М Н О П.', '[email protected]', ['[email protected]'], headers={'From': '[email protected]'}, ) s = msg.message().as_bytes() self.assertIn(b'Content-Transfer-Encoding: 8bit', s) s = msg.message().as_string() self.assertIn('Content-Transfer-Encoding: 8bit', s) def test_dont_base64_encode_message_rfc822(self): # Ticket #18967 # Shouldn't use base64 encoding for a child EmailMessage attachment. # Create a child message first child_msg = EmailMessage( 'Child Subject', 'Some body of child message', '[email protected]', ['[email protected]'], headers={'From': '[email protected]'}, ) child_s = child_msg.message().as_string() # Now create a parent parent_msg = EmailMessage( 'Parent Subject', 'Some parent body', '[email protected]', ['[email protected]'], headers={'From': '[email protected]'}, ) # Attach to parent as a string parent_msg.attach(content=child_s, mimetype='message/rfc822') parent_s = parent_msg.message().as_string() # The child message header is not base64 encoded self.assertIn('Child Subject', parent_s) # Feature test: try attaching email.Message object directly to the mail. parent_msg = EmailMessage( 'Parent Subject', 'Some parent body', '[email protected]', ['[email protected]'], headers={'From': '[email protected]'}, ) parent_msg.attach(content=child_msg.message(), mimetype='message/rfc822') parent_s = parent_msg.message().as_string() # The child message header is not base64 encoded self.assertIn('Child Subject', parent_s) # Feature test: try attaching Django's EmailMessage object directly to the mail. parent_msg = EmailMessage( 'Parent Subject', 'Some parent body', '[email protected]', ['[email protected]'], headers={'From': '[email protected]'}, ) parent_msg.attach(content=child_msg, mimetype='message/rfc822') parent_s = parent_msg.message().as_string() # The child message header is not base64 encoded self.assertIn('Child Subject', parent_s) def test_custom_utf8_encoding(self): """A UTF-8 charset with a custom body encoding is respected.""" body = 'Body with latin characters: àáä.' msg = EmailMessage('Subject', body, '[email protected]', ['[email protected]']) encoding = charset.Charset('utf-8') encoding.body_encoding = charset.QP msg.encoding = encoding message = msg.message() self.assertMessageHasHeaders(message, { ('MIME-Version', '1.0'), ('Content-Type', 'text/plain; charset="utf-8"'), ('Content-Transfer-Encoding', 'quoted-printable'), }) self.assertEqual(message.get_payload(), encoding.body_encode(body)) def test_sanitize_address(self): """Email addresses are properly sanitized.""" for email_address, encoding, expected_result in ( # ASCII addresses. ('[email protected]', 'ascii', '[email protected]'), ('[email protected]', 'utf-8', '[email protected]'), (('A name', '[email protected]'), 'ascii', 'A name <[email protected]>'), ( ('A name', '[email protected]'), 'utf-8', '=?utf-8?q?A_name?= <[email protected]>', ), ('localpartonly', 'ascii', 'localpartonly'), # ASCII addresses with display names. ('A name <[email protected]>', 'ascii', 'A name <[email protected]>'), ('A name <[email protected]>', 'utf-8', '=?utf-8?q?A_name?= <[email protected]>'), ('"A name" <[email protected]>', 'ascii', 'A name <[email protected]>'), ('"A name" <[email protected]>', 'utf-8', '=?utf-8?q?A_name?= <[email protected]>'), # Unicode addresses (supported per RFC-6532). ('tó@example.com', 'utf-8', '[email protected]'), ('to@éxample.com', 'utf-8', '[email protected]'), ( ('Tó Example', 'tó@example.com'), 'utf-8', '=?utf-8?q?T=C3=B3_Example?= <[email protected]>', ), # Unicode addresses with display names. ( 'Tó Example <tó@example.com>', 'utf-8', '=?utf-8?q?T=C3=B3_Example?= <[email protected]>', ), ('To Example <to@éxample.com>', 'ascii', 'To Example <[email protected]>'), ( 'To Example <to@éxample.com>', 'utf-8', '=?utf-8?q?To_Example?= <[email protected]>', ), # Addresses with two @ signs. ('"[email protected]"@example.com', 'utf-8', r'"[email protected]"@example.com'), ( '"[email protected]" <[email protected]>', 'utf-8', '=?utf-8?q?to=40other=2Ecom?= <[email protected]>', ), ( ('To Example', '[email protected]@example.com'), 'utf-8', '=?utf-8?q?To_Example?= <"[email protected]"@example.com>', ), ): with self.subTest(email_address=email_address, encoding=encoding): self.assertEqual(sanitize_address(email_address, encoding), expected_result) def test_sanitize_address_invalid(self): for email_address in ( # Invalid address with two @ signs. '[email protected]@example.com', # Invalid address without the quotes. '[email protected] <[email protected]>', # Other invalid addresses. '@', 'to@', '@example.com', ): with self.subTest(email_address=email_address): with self.assertRaises(ValueError): sanitize_address(email_address, encoding='utf-8') @requires_tz_support class MailTimeZoneTests(SimpleTestCase): @override_settings(EMAIL_USE_LOCALTIME=False, USE_TZ=True, TIME_ZONE='Africa/Algiers') def test_date_header_utc(self): """ EMAIL_USE_LOCALTIME=False creates a datetime in UTC. """ email = EmailMessage('Subject', 'Body', '[email protected]', ['[email protected]']) self.assertTrue(email.message()['Date'].endswith('-0000')) @override_settings(EMAIL_USE_LOCALTIME=True, USE_TZ=True, TIME_ZONE='Africa/Algiers') def test_date_header_localtime(self): """ EMAIL_USE_LOCALTIME=True creates a datetime in the local time zone. """ email = EmailMessage('Subject', 'Body', '[email protected]', ['[email protected]']) self.assertTrue(email.message()['Date'].endswith('+0100')) # Africa/Algiers is UTC+1 class PythonGlobalState(SimpleTestCase): """ Tests for #12422 -- Django smarts (#2472/#11212) with charset of utf-8 text parts shouldn't pollute global email Python package charset registry when django.mail.message is imported. """ def test_utf8(self): txt = MIMEText('UTF-8 encoded body', 'plain', 'utf-8') self.assertIn('Content-Transfer-Encoding: base64', txt.as_string()) def test_7bit(self): txt = MIMEText('Body with only ASCII characters.', 'plain', 'utf-8') self.assertIn('Content-Transfer-Encoding: base64', txt.as_string()) def test_8bit_latin(self): txt = MIMEText('Body with latin characters: àáä.', 'plain', 'utf-8') self.assertIn('Content-Transfer-Encoding: base64', txt.as_string()) def test_8bit_non_latin(self): txt = MIMEText('Body with non latin characters: А Б В Г Д Е Ж Ѕ З И І К Л М Н О П.', 'plain', 'utf-8') self.assertIn('Content-Transfer-Encoding: base64', txt.as_string()) class BaseEmailBackendTests(HeadersCheckMixin): email_backend = None def setUp(self): self.settings_override = override_settings(EMAIL_BACKEND=self.email_backend) self.settings_override.enable() def tearDown(self): self.settings_override.disable() def assertStartsWith(self, first, second): if not first.startswith(second): self.longMessage = True self.assertEqual(first[:len(second)], second, "First string doesn't start with the second.") def get_mailbox_content(self): raise NotImplementedError('subclasses of BaseEmailBackendTests must provide a get_mailbox_content() method') def flush_mailbox(self): raise NotImplementedError('subclasses of BaseEmailBackendTests may require a flush_mailbox() method') def get_the_message(self): mailbox = self.get_mailbox_content() self.assertEqual( len(mailbox), 1, "Expected exactly one message, got %d.\n%r" % (len(mailbox), [m.as_string() for m in mailbox]) ) return mailbox[0] def test_send(self): email = EmailMessage('Subject', 'Content', '[email protected]', ['[email protected]']) num_sent = mail.get_connection().send_messages([email]) self.assertEqual(num_sent, 1) message = self.get_the_message() self.assertEqual(message["subject"], "Subject") self.assertEqual(message.get_payload(), "Content") self.assertEqual(message["from"], "[email protected]") self.assertEqual(message.get_all("to"), ["[email protected]"]) def test_send_unicode(self): email = EmailMessage('Chère maman', 'Je t\'aime très fort', '[email protected]', ['[email protected]']) num_sent = mail.get_connection().send_messages([email]) self.assertEqual(num_sent, 1) message = self.get_the_message() self.assertEqual(message["subject"], '=?utf-8?q?Ch=C3=A8re_maman?=') self.assertEqual(message.get_payload(decode=True).decode(), 'Je t\'aime très fort') def test_send_long_lines(self): """ Email line length is limited to 998 chars by the RFC: https://tools.ietf.org/html/rfc5322#section-2.1.1 Message body containing longer lines are converted to Quoted-Printable to avoid having to insert newlines, which could be hairy to do properly. """ # Unencoded body length is < 998 (840) but > 998 when utf-8 encoded. email = EmailMessage('Subject', 'В южных морях ' * 60, '[email protected]', ['[email protected]']) email.send() message = self.get_the_message() self.assertMessageHasHeaders(message, { ('MIME-Version', '1.0'), ('Content-Type', 'text/plain; charset="utf-8"'), ('Content-Transfer-Encoding', 'quoted-printable'), }) def test_send_many(self): email1 = EmailMessage('Subject', 'Content1', '[email protected]', ['[email protected]']) email2 = EmailMessage('Subject', 'Content2', '[email protected]', ['[email protected]']) # send_messages() may take a list or an iterator. emails_lists = ([email1, email2], iter((email1, email2))) for emails_list in emails_lists: num_sent = mail.get_connection().send_messages(emails_list) self.assertEqual(num_sent, 2) messages = self.get_mailbox_content() self.assertEqual(len(messages), 2) self.assertEqual(messages[0].get_payload(), 'Content1') self.assertEqual(messages[1].get_payload(), 'Content2') self.flush_mailbox() def test_send_verbose_name(self): email = EmailMessage("Subject", "Content", '"Firstname Sürname" <[email protected]>', ["[email protected]"]) email.send() message = self.get_the_message() self.assertEqual(message["subject"], "Subject") self.assertEqual(message.get_payload(), "Content") self.assertEqual(message["from"], "=?utf-8?q?Firstname_S=C3=BCrname?= <[email protected]>") def test_plaintext_send_mail(self): """ Test send_mail without the html_message regression test for adding html_message parameter to send_mail() """ send_mail('Subject', 'Content', '[email protected]', ['[email protected]']) message = self.get_the_message() self.assertEqual(message.get('subject'), 'Subject') self.assertEqual(message.get_all('to'), ['[email protected]']) self.assertFalse(message.is_multipart()) self.assertEqual(message.get_payload(), 'Content') self.assertEqual(message.get_content_type(), 'text/plain') def test_html_send_mail(self): """Test html_message argument to send_mail""" send_mail('Subject', 'Content', '[email protected]', ['[email protected]'], html_message='HTML Content') message = self.get_the_message() self.assertEqual(message.get('subject'), 'Subject') self.assertEqual(message.get_all('to'), ['[email protected]']) self.assertTrue(message.is_multipart()) self.assertEqual(len(message.get_payload()), 2) self.assertEqual(message.get_payload(0).get_payload(), 'Content') self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain') self.assertEqual(message.get_payload(1).get_payload(), 'HTML Content') self.assertEqual(message.get_payload(1).get_content_type(), 'text/html') @override_settings(MANAGERS=[('nobody', '[email protected]')]) def test_html_mail_managers(self): """Test html_message argument to mail_managers""" mail_managers('Subject', 'Content', html_message='HTML Content') message = self.get_the_message() self.assertEqual(message.get('subject'), '[Django] Subject') self.assertEqual(message.get_all('to'), ['[email protected]']) self.assertTrue(message.is_multipart()) self.assertEqual(len(message.get_payload()), 2) self.assertEqual(message.get_payload(0).get_payload(), 'Content') self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain') self.assertEqual(message.get_payload(1).get_payload(), 'HTML Content') self.assertEqual(message.get_payload(1).get_content_type(), 'text/html') @override_settings(ADMINS=[('nobody', '[email protected]')]) def test_html_mail_admins(self): """Test html_message argument to mail_admins """ mail_admins('Subject', 'Content', html_message='HTML Content') message = self.get_the_message() self.assertEqual(message.get('subject'), '[Django] Subject') self.assertEqual(message.get_all('to'), ['[email protected]']) self.assertTrue(message.is_multipart()) self.assertEqual(len(message.get_payload()), 2) self.assertEqual(message.get_payload(0).get_payload(), 'Content') self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain') self.assertEqual(message.get_payload(1).get_payload(), 'HTML Content') self.assertEqual(message.get_payload(1).get_content_type(), 'text/html') @override_settings( ADMINS=[('nobody', '[email protected]')], MANAGERS=[('nobody', '[email protected]')]) def test_manager_and_admin_mail_prefix(self): """ String prefix + lazy translated subject = bad output Regression for #13494 """ mail_managers(gettext_lazy('Subject'), 'Content') message = self.get_the_message() self.assertEqual(message.get('subject'), '[Django] Subject') self.flush_mailbox() mail_admins(gettext_lazy('Subject'), 'Content') message = self.get_the_message() self.assertEqual(message.get('subject'), '[Django] Subject') @override_settings(ADMINS=[], MANAGERS=[]) def test_empty_admins(self): """ mail_admins/mail_managers doesn't connect to the mail server if there are no recipients (#9383) """ mail_admins('hi', 'there') self.assertEqual(self.get_mailbox_content(), []) mail_managers('hi', 'there') self.assertEqual(self.get_mailbox_content(), []) def test_wrong_admins_managers(self): tests = ( '[email protected]', ('[email protected]',), ['[email protected]', '[email protected]'], ('[email protected]', '[email protected]'), ) for setting, mail_func in ( ('ADMINS', mail_admins), ('MANAGERS', mail_managers), ): msg = 'The %s setting must be a list of 2-tuples.' % setting for value in tests: with self.subTest(setting=setting, value=value), self.settings(**{setting: value}): with self.assertRaisesMessage(ValueError, msg): mail_func('subject', 'content') def test_message_cc_header(self): """ Regression test for #7722 """ email = EmailMessage('Subject', 'Content', '[email protected]', ['[email protected]'], cc=['[email protected]']) mail.get_connection().send_messages([email]) message = self.get_the_message() self.assertMessageHasHeaders(message, { ('MIME-Version', '1.0'), ('Content-Type', 'text/plain; charset="utf-8"'), ('Content-Transfer-Encoding', '7bit'), ('Subject', 'Subject'), ('From', '[email protected]'), ('To', '[email protected]'), ('Cc', '[email protected]')}) self.assertIn('\nDate: ', message.as_string()) def test_idn_send(self): """ Regression test for #14301 """ self.assertTrue(send_mail('Subject', 'Content', 'from@öäü.com', ['to@öäü.com'])) message = self.get_the_message() self.assertEqual(message.get('subject'), 'Subject') self.assertEqual(message.get('from'), '[email protected]') self.assertEqual(message.get('to'), '[email protected]') self.flush_mailbox() m = EmailMessage('Subject', 'Content', 'from@öäü.com', ['to@öäü.com'], cc=['cc@öäü.com']) m.send() message = self.get_the_message() self.assertEqual(message.get('subject'), 'Subject') self.assertEqual(message.get('from'), '[email protected]') self.assertEqual(message.get('to'), '[email protected]') self.assertEqual(message.get('cc'), '[email protected]') def test_recipient_without_domain(self): """ Regression test for #15042 """ self.assertTrue(send_mail("Subject", "Content", "tester", ["django"])) message = self.get_the_message() self.assertEqual(message.get('subject'), 'Subject') self.assertEqual(message.get('from'), "tester") self.assertEqual(message.get('to'), "django") def test_lazy_addresses(self): """ Email sending should support lazy email addresses (#24416). """ _ = gettext_lazy self.assertTrue(send_mail('Subject', 'Content', _('tester'), [_('django')])) message = self.get_the_message() self.assertEqual(message.get('from'), 'tester') self.assertEqual(message.get('to'), 'django') self.flush_mailbox() m = EmailMessage( 'Subject', 'Content', _('tester'), [_('to1'), _('to2')], cc=[_('cc1'), _('cc2')], bcc=[_('bcc')], reply_to=[_('reply')], ) self.assertEqual(m.recipients(), ['to1', 'to2', 'cc1', 'cc2', 'bcc']) m.send() message = self.get_the_message() self.assertEqual(message.get('from'), 'tester') self.assertEqual(message.get('to'), 'to1, to2') self.assertEqual(message.get('cc'), 'cc1, cc2') self.assertEqual(message.get('Reply-To'), 'reply') def test_close_connection(self): """ Connection can be closed (even when not explicitly opened) """ conn = mail.get_connection(username='', password='') conn.close() def test_use_as_contextmanager(self): """ The connection can be used as a contextmanager. """ opened = [False] closed = [False] conn = mail.get_connection(username='', password='') def open(): opened[0] = True conn.open = open def close(): closed[0] = True conn.close = close with conn as same_conn: self.assertTrue(opened[0]) self.assertIs(same_conn, conn) self.assertFalse(closed[0]) self.assertTrue(closed[0]) class LocmemBackendTests(BaseEmailBackendTests, SimpleTestCase): email_backend = 'django.core.mail.backends.locmem.EmailBackend' def get_mailbox_content(self): return [m.message() for m in mail.outbox] def flush_mailbox(self): mail.outbox = [] def tearDown(self): super().tearDown() mail.outbox = [] def test_locmem_shared_messages(self): """ Make sure that the locmen backend populates the outbox. """ connection = locmem.EmailBackend() connection2 = locmem.EmailBackend() email = EmailMessage( 'Subject', 'Content', '[email protected]', ['[email protected]'], headers={'From': '[email protected]'}, ) connection.send_messages([email]) connection2.send_messages([email]) self.assertEqual(len(mail.outbox), 2) def test_validate_multiline_headers(self): # Ticket #18861 - Validate emails when using the locmem backend with self.assertRaises(BadHeaderError): send_mail('Subject\nMultiline', 'Content', '[email protected]', ['[email protected]']) class FileBackendTests(BaseEmailBackendTests, SimpleTestCase): email_backend = 'django.core.mail.backends.filebased.EmailBackend' def setUp(self): super().setUp() self.tmp_dir = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, self.tmp_dir) self._settings_override = override_settings(EMAIL_FILE_PATH=self.tmp_dir) self._settings_override.enable() def tearDown(self): self._settings_override.disable() super().tearDown() def flush_mailbox(self): for filename in os.listdir(self.tmp_dir): os.unlink(os.path.join(self.tmp_dir, filename)) def get_mailbox_content(self): messages = [] for filename in os.listdir(self.tmp_dir): with open(os.path.join(self.tmp_dir, filename), 'rb') as fp: session = fp.read().split(b'\n' + (b'-' * 79) + b'\n') messages.extend(message_from_bytes(m) for m in session if m) return messages def test_file_sessions(self): """Make sure opening a connection creates a new file""" msg = EmailMessage( 'Subject', 'Content', '[email protected]', ['[email protected]'], headers={'From': '[email protected]'}, ) connection = mail.get_connection() connection.send_messages([msg]) self.assertEqual(len(os.listdir(self.tmp_dir)), 1) with open(os.path.join(self.tmp_dir, os.listdir(self.tmp_dir)[0]), 'rb') as fp: message = message_from_binary_file(fp) self.assertEqual(message.get_content_type(), 'text/plain') self.assertEqual(message.get('subject'), 'Subject') self.assertEqual(message.get('from'), '[email protected]') self.assertEqual(message.get('to'), '[email protected]') connection2 = mail.get_connection() connection2.send_messages([msg]) self.assertEqual(len(os.listdir(self.tmp_dir)), 2) connection.send_messages([msg]) self.assertEqual(len(os.listdir(self.tmp_dir)), 2) msg.connection = mail.get_connection() self.assertTrue(connection.open()) msg.send() self.assertEqual(len(os.listdir(self.tmp_dir)), 3) msg.send() self.assertEqual(len(os.listdir(self.tmp_dir)), 3) connection.close() class ConsoleBackendTests(BaseEmailBackendTests, SimpleTestCase): email_backend = 'django.core.mail.backends.console.EmailBackend' def setUp(self): super().setUp() self.__stdout = sys.stdout self.stream = sys.stdout = StringIO() def tearDown(self): del self.stream sys.stdout = self.__stdout del self.__stdout super().tearDown() def flush_mailbox(self): self.stream = sys.stdout = StringIO() def get_mailbox_content(self): messages = self.stream.getvalue().split('\n' + ('-' * 79) + '\n') return [message_from_bytes(m.encode()) for m in messages if m] def test_console_stream_kwarg(self): """ The console backend can be pointed at an arbitrary stream. """ s = StringIO() connection = mail.get_connection('django.core.mail.backends.console.EmailBackend', stream=s) send_mail('Subject', 'Content', '[email protected]', ['[email protected]'], connection=connection) message = s.getvalue().split('\n' + ('-' * 79) + '\n')[0].encode() self.assertMessageHasHeaders(message, { ('MIME-Version', '1.0'), ('Content-Type', 'text/plain; charset="utf-8"'), ('Content-Transfer-Encoding', '7bit'), ('Subject', 'Subject'), ('From', '[email protected]'), ('To', '[email protected]')}) self.assertIn(b'\nDate: ', message) class FakeSMTPChannel(smtpd.SMTPChannel): def collect_incoming_data(self, data): try: smtpd.SMTPChannel.collect_incoming_data(self, data) except UnicodeDecodeError: # Ignore decode error in SSL/TLS connection tests as the test only # cares whether the connection attempt was made. pass def smtp_AUTH(self, arg): if arg == 'CRAM-MD5': # This is only the first part of the login process. But it's enough # for our tests. challenge = base64.b64encode(b'somerandomstring13579') self.push('334 %s' % challenge.decode()) else: self.push('502 Error: login "%s" not implemented' % arg) class FakeSMTPServer(smtpd.SMTPServer, threading.Thread): """ Asyncore SMTP server wrapped into a thread. Based on DummyFTPServer from: http://svn.python.org/view/python/branches/py3k/Lib/test/test_ftplib.py?revision=86061&view=markup """ channel_class = FakeSMTPChannel def __init__(self, *args, **kwargs): threading.Thread.__init__(self) smtpd.SMTPServer.__init__(self, *args, decode_data=True, **kwargs) self._sink = [] self.active = False self.active_lock = threading.Lock() self.sink_lock = threading.Lock() def process_message(self, peer, mailfrom, rcpttos, data): data = data.encode() m = message_from_bytes(data) maddr = parseaddr(m.get('from'))[1] if mailfrom != maddr: # According to the spec, mailfrom does not necessarily match the # From header - this is the case where the local part isn't # encoded, so try to correct that. lp, domain = mailfrom.split('@', 1) lp = Header(lp, 'utf-8').encode() mailfrom = '@'.join([lp, domain]) if mailfrom != maddr: return "553 '%s' != '%s'" % (mailfrom, maddr) with self.sink_lock: self._sink.append(m) def get_sink(self): with self.sink_lock: return self._sink[:] def flush_sink(self): with self.sink_lock: self._sink[:] = [] def start(self): assert not self.active self.__flag = threading.Event() threading.Thread.start(self) self.__flag.wait() def run(self): self.active = True self.__flag.set() while self.active and asyncore.socket_map: with self.active_lock: asyncore.loop(timeout=0.1, count=1) asyncore.close_all() def stop(self): if self.active: self.active = False self.join() class FakeAUTHSMTPConnection(SMTP): """ A SMTP connection pretending support for the AUTH command. It does not, but at least this can allow testing the first part of the AUTH process. """ def ehlo(self, name=''): response = SMTP.ehlo(self, name=name) self.esmtp_features.update({ 'auth': 'CRAM-MD5 PLAIN LOGIN', }) return response class SMTPBackendTestsBase(SimpleTestCase): @classmethod def setUpClass(cls): super().setUpClass() cls.server = FakeSMTPServer(('127.0.0.1', 0), None) cls._settings_override = override_settings( EMAIL_HOST="127.0.0.1", EMAIL_PORT=cls.server.socket.getsockname()[1]) cls._settings_override.enable() cls.server.start() @classmethod def tearDownClass(cls): cls._settings_override.disable() cls.server.stop() super().tearDownClass() class SMTPBackendTests(BaseEmailBackendTests, SMTPBackendTestsBase): email_backend = 'django.core.mail.backends.smtp.EmailBackend' def setUp(self): super().setUp() self.server.flush_sink() def tearDown(self): self.server.flush_sink() super().tearDown() def flush_mailbox(self): self.server.flush_sink() def get_mailbox_content(self): return self.server.get_sink() @override_settings( EMAIL_HOST_USER="not empty username", EMAIL_HOST_PASSWORD='not empty password', ) def test_email_authentication_use_settings(self): backend = smtp.EmailBackend() self.assertEqual(backend.username, 'not empty username') self.assertEqual(backend.password, 'not empty password') @override_settings( EMAIL_HOST_USER="not empty username", EMAIL_HOST_PASSWORD='not empty password', ) def test_email_authentication_override_settings(self): backend = smtp.EmailBackend(username='username', password='password') self.assertEqual(backend.username, 'username') self.assertEqual(backend.password, 'password') @override_settings( EMAIL_HOST_USER="not empty username", EMAIL_HOST_PASSWORD='not empty password', ) def test_email_disabled_authentication(self): backend = smtp.EmailBackend(username='', password='') self.assertEqual(backend.username, '') self.assertEqual(backend.password, '') def test_auth_attempted(self): """ Opening the backend with non empty username/password tries to authenticate against the SMTP server. """ backend = smtp.EmailBackend( username='not empty username', password='not empty password') with self.assertRaisesMessage(SMTPException, 'SMTP AUTH extension not supported by server.'): with backend: pass def test_server_open(self): """ open() returns whether it opened a connection. """ backend = smtp.EmailBackend(username='', password='') self.assertIsNone(backend.connection) opened = backend.open() backend.close() self.assertIs(opened, True) def test_reopen_connection(self): backend = smtp.EmailBackend() # Simulate an already open connection. backend.connection = True self.assertIs(backend.open(), False) def test_server_login(self): """ Even if the Python SMTP server doesn't support authentication, the login process starts and the appropriate exception is raised. """ class CustomEmailBackend(smtp.EmailBackend): connection_class = FakeAUTHSMTPConnection backend = CustomEmailBackend(username='username', password='password') with self.assertRaises(SMTPAuthenticationError): with backend: pass @override_settings(EMAIL_USE_TLS=True) def test_email_tls_use_settings(self): backend = smtp.EmailBackend() self.assertTrue(backend.use_tls) @override_settings(EMAIL_USE_TLS=True) def test_email_tls_override_settings(self): backend = smtp.EmailBackend(use_tls=False) self.assertFalse(backend.use_tls) def test_email_tls_default_disabled(self): backend = smtp.EmailBackend() self.assertFalse(backend.use_tls) def test_ssl_tls_mutually_exclusive(self): msg = ( 'EMAIL_USE_TLS/EMAIL_USE_SSL are mutually exclusive, so only set ' 'one of those settings to True.' ) with self.assertRaisesMessage(ValueError, msg): smtp.EmailBackend(use_ssl=True, use_tls=True) @override_settings(EMAIL_USE_SSL=True) def test_email_ssl_use_settings(self): backend = smtp.EmailBackend() self.assertTrue(backend.use_ssl) @override_settings(EMAIL_USE_SSL=True) def test_email_ssl_override_settings(self): backend = smtp.EmailBackend(use_ssl=False) self.assertFalse(backend.use_ssl) def test_email_ssl_default_disabled(self): backend = smtp.EmailBackend() self.assertFalse(backend.use_ssl) @override_settings(EMAIL_SSL_CERTFILE='foo') def test_email_ssl_certfile_use_settings(self): backend = smtp.EmailBackend() self.assertEqual(backend.ssl_certfile, 'foo') @override_settings(EMAIL_SSL_CERTFILE='foo') def test_email_ssl_certfile_override_settings(self): backend = smtp.EmailBackend(ssl_certfile='bar') self.assertEqual(backend.ssl_certfile, 'bar') def test_email_ssl_certfile_default_disabled(self): backend = smtp.EmailBackend() self.assertIsNone(backend.ssl_certfile) @override_settings(EMAIL_SSL_KEYFILE='foo') def test_email_ssl_keyfile_use_settings(self): backend = smtp.EmailBackend() self.assertEqual(backend.ssl_keyfile, 'foo') @override_settings(EMAIL_SSL_KEYFILE='foo') def test_email_ssl_keyfile_override_settings(self): backend = smtp.EmailBackend(ssl_keyfile='bar') self.assertEqual(backend.ssl_keyfile, 'bar') def test_email_ssl_keyfile_default_disabled(self): backend = smtp.EmailBackend() self.assertIsNone(backend.ssl_keyfile) @override_settings(EMAIL_USE_TLS=True) def test_email_tls_attempts_starttls(self): backend = smtp.EmailBackend() self.assertTrue(backend.use_tls) with self.assertRaisesMessage(SMTPException, 'STARTTLS extension not supported by server.'): with backend: pass @override_settings(EMAIL_USE_SSL=True) def test_email_ssl_attempts_ssl_connection(self): backend = smtp.EmailBackend() self.assertTrue(backend.use_ssl) with self.assertRaises(SSLError): with backend: pass def test_connection_timeout_default(self): """The connection's timeout value is None by default.""" connection = mail.get_connection('django.core.mail.backends.smtp.EmailBackend') self.assertIsNone(connection.timeout) def test_connection_timeout_custom(self): """The timeout parameter can be customized.""" class MyEmailBackend(smtp.EmailBackend): def __init__(self, *args, **kwargs): kwargs.setdefault('timeout', 42) super().__init__(*args, **kwargs) myemailbackend = MyEmailBackend() myemailbackend.open() self.assertEqual(myemailbackend.timeout, 42) self.assertEqual(myemailbackend.connection.timeout, 42) myemailbackend.close() @override_settings(EMAIL_TIMEOUT=10) def test_email_timeout_override_settings(self): backend = smtp.EmailBackend() self.assertEqual(backend.timeout, 10) def test_email_msg_uses_crlf(self): """#23063 -- RFC-compliant messages are sent over SMTP.""" send = SMTP.send try: smtp_messages = [] def mock_send(self, s): smtp_messages.append(s) return send(self, s) SMTP.send = mock_send email = EmailMessage('Subject', 'Content', '[email protected]', ['[email protected]']) mail.get_connection().send_messages([email]) # Find the actual message msg = None for i, m in enumerate(smtp_messages): if m[:4] == 'data': msg = smtp_messages[i + 1] break self.assertTrue(msg) msg = msg.decode() # The message only contains CRLF and not combinations of CRLF, LF, and CR. msg = msg.replace('\r\n', '') self.assertNotIn('\r', msg) self.assertNotIn('\n', msg) finally: SMTP.send = send def test_send_messages_after_open_failed(self): """ send_messages() shouldn't try to send messages if open() raises an exception after initializing the connection. """ backend = smtp.EmailBackend() # Simulate connection initialization success and a subsequent # connection exception. backend.connection = True backend.open = lambda: None email = EmailMessage('Subject', 'Content', '[email protected]', ['[email protected]']) self.assertEqual(backend.send_messages([email]), 0) def test_send_messages_empty_list(self): backend = smtp.EmailBackend() backend.connection = True self.assertEqual(backend.send_messages([]), 0) def test_send_messages_zero_sent(self): """A message isn't sent if it doesn't have any recipients.""" backend = smtp.EmailBackend() backend.connection = True email = EmailMessage('Subject', 'Content', '[email protected]', to=[]) sent = backend.send_messages([email]) self.assertEqual(sent, 0) class SMTPBackendStoppedServerTests(SMTPBackendTestsBase): """ These tests require a separate class, because the FakeSMTPServer is shut down in setUpClass(), and it cannot be restarted ("RuntimeError: threads can only be started once"). """ @classmethod def setUpClass(cls): super().setUpClass() cls.backend = smtp.EmailBackend(username='', password='') cls.server.stop() def test_server_stopped(self): """ Closing the backend while the SMTP server is stopped doesn't raise an exception. """ self.backend.close() def test_fail_silently_on_connection_error(self): """ A socket connection error is silenced with fail_silently=True. """ with self.assertRaises(ConnectionError): self.backend.open() self.backend.fail_silently = True self.backend.open()
c54ddc0d20f831e6330ccc0773b71ac74cc5d8d14a990120be71243e2db36705
import copy import inspect import warnings from functools import partialmethod from itertools import chain from django.apps import apps from django.conf import settings from django.core import checks from django.core.exceptions import ( NON_FIELD_ERRORS, FieldDoesNotExist, FieldError, MultipleObjectsReturned, ObjectDoesNotExist, ValidationError, ) from django.db import ( DEFAULT_DB_ALIAS, DJANGO_VERSION_PICKLE_KEY, DatabaseError, connection, connections, router, transaction, ) from django.db.models.constants import LOOKUP_SEP from django.db.models.constraints import CheckConstraint, UniqueConstraint from django.db.models.deletion import CASCADE, Collector from django.db.models.fields.related import ( ForeignObjectRel, OneToOneField, lazy_related_operation, resolve_relation, ) from django.db.models.manager import Manager from django.db.models.options import Options from django.db.models.query import Q from django.db.models.signals import ( class_prepared, post_init, post_save, pre_init, pre_save, ) from django.db.models.utils import make_model_tuple from django.utils.encoding import force_str from django.utils.text import capfirst, get_text_list from django.utils.translation import gettext_lazy as _ from django.utils.version import get_version class Deferred: def __repr__(self): return '<Deferred field>' def __str__(self): return '<Deferred field>' DEFERRED = Deferred() def subclass_exception(name, bases, module, attached_to): """ Create exception subclass. Used by ModelBase below. The exception is created in a way that allows it to be pickled, assuming that the returned exception class will be added as an attribute to the 'attached_to' class. """ return type(name, bases, { '__module__': module, '__qualname__': '%s.%s' % (attached_to.__qualname__, name), }) def _has_contribute_to_class(value): # Only call contribute_to_class() if it's bound. return not inspect.isclass(value) and hasattr(value, 'contribute_to_class') class ModelBase(type): """Metaclass for all models.""" def __new__(cls, name, bases, attrs, **kwargs): super_new = super().__new__ # Also ensure initialization is only performed for subclasses of Model # (excluding Model class itself). parents = [b for b in bases if isinstance(b, ModelBase)] if not parents: return super_new(cls, name, bases, attrs) # Create the class. module = attrs.pop('__module__') new_attrs = {'__module__': module} classcell = attrs.pop('__classcell__', None) if classcell is not None: new_attrs['__classcell__'] = classcell attr_meta = attrs.pop('Meta', None) # Pass all attrs without a (Django-specific) contribute_to_class() # method to type.__new__() so that they're properly initialized # (i.e. __set_name__()). contributable_attrs = {} for obj_name, obj in list(attrs.items()): if _has_contribute_to_class(obj): contributable_attrs[obj_name] = obj else: new_attrs[obj_name] = obj new_class = super_new(cls, name, bases, new_attrs, **kwargs) abstract = getattr(attr_meta, 'abstract', False) meta = attr_meta or getattr(new_class, 'Meta', None) base_meta = getattr(new_class, '_meta', None) app_label = None # Look for an application configuration to attach the model to. app_config = apps.get_containing_app_config(module) if getattr(meta, 'app_label', None) is None: if app_config is None: if not abstract: raise RuntimeError( "Model class %s.%s doesn't declare an explicit " "app_label and isn't in an application in " "INSTALLED_APPS." % (module, name) ) else: app_label = app_config.label new_class.add_to_class('_meta', Options(meta, app_label)) if not abstract: new_class.add_to_class( 'DoesNotExist', subclass_exception( 'DoesNotExist', tuple( x.DoesNotExist for x in parents if hasattr(x, '_meta') and not x._meta.abstract ) or (ObjectDoesNotExist,), module, attached_to=new_class)) new_class.add_to_class( 'MultipleObjectsReturned', subclass_exception( 'MultipleObjectsReturned', tuple( x.MultipleObjectsReturned for x in parents if hasattr(x, '_meta') and not x._meta.abstract ) or (MultipleObjectsReturned,), module, attached_to=new_class)) if base_meta and not base_meta.abstract: # Non-abstract child classes inherit some attributes from their # non-abstract parent (unless an ABC comes before it in the # method resolution order). if not hasattr(meta, 'ordering'): new_class._meta.ordering = base_meta.ordering if not hasattr(meta, 'get_latest_by'): new_class._meta.get_latest_by = base_meta.get_latest_by is_proxy = new_class._meta.proxy # If the model is a proxy, ensure that the base class # hasn't been swapped out. if is_proxy and base_meta and base_meta.swapped: raise TypeError("%s cannot proxy the swapped model '%s'." % (name, base_meta.swapped)) # Add remaining attributes (those with a contribute_to_class() method) # to the class. for obj_name, obj in contributable_attrs.items(): new_class.add_to_class(obj_name, obj) # All the fields of any type declared on this model new_fields = chain( new_class._meta.local_fields, new_class._meta.local_many_to_many, new_class._meta.private_fields ) field_names = {f.name for f in new_fields} # Basic setup for proxy models. if is_proxy: base = None for parent in [kls for kls in parents if hasattr(kls, '_meta')]: if parent._meta.abstract: if parent._meta.fields: raise TypeError( "Abstract base class containing model fields not " "permitted for proxy model '%s'." % name ) else: continue if base is None: base = parent elif parent._meta.concrete_model is not base._meta.concrete_model: raise TypeError("Proxy model '%s' has more than one non-abstract model base class." % name) if base is None: raise TypeError("Proxy model '%s' has no non-abstract model base class." % name) new_class._meta.setup_proxy(base) new_class._meta.concrete_model = base._meta.concrete_model else: new_class._meta.concrete_model = new_class # Collect the parent links for multi-table inheritance. parent_links = {} for base in reversed([new_class] + parents): # Conceptually equivalent to `if base is Model`. if not hasattr(base, '_meta'): continue # Skip concrete parent classes. if base != new_class and not base._meta.abstract: continue # Locate OneToOneField instances. for field in base._meta.local_fields: if isinstance(field, OneToOneField): related = resolve_relation(new_class, field.remote_field.model) parent_links[make_model_tuple(related)] = field # Track fields inherited from base models. inherited_attributes = set() # Do the appropriate setup for any model parents. for base in new_class.mro(): if base not in parents or not hasattr(base, '_meta'): # Things without _meta aren't functional models, so they're # uninteresting parents. inherited_attributes.update(base.__dict__) continue parent_fields = base._meta.local_fields + base._meta.local_many_to_many if not base._meta.abstract: # Check for clashes between locally declared fields and those # on the base classes. for field in parent_fields: if field.name in field_names: raise FieldError( 'Local field %r in class %r clashes with field of ' 'the same name from base class %r.' % ( field.name, name, base.__name__, ) ) else: inherited_attributes.add(field.name) # Concrete classes... base = base._meta.concrete_model base_key = make_model_tuple(base) if base_key in parent_links: field = parent_links[base_key] elif not is_proxy: attr_name = '%s_ptr' % base._meta.model_name field = OneToOneField( base, on_delete=CASCADE, name=attr_name, auto_created=True, parent_link=True, ) if attr_name in field_names: raise FieldError( "Auto-generated field '%s' in class %r for " "parent_link to base class %r clashes with " "declared field of the same name." % ( attr_name, name, base.__name__, ) ) # Only add the ptr field if it's not already present; # e.g. migrations will already have it specified if not hasattr(new_class, attr_name): new_class.add_to_class(attr_name, field) else: field = None new_class._meta.parents[base] = field else: base_parents = base._meta.parents.copy() # Add fields from abstract base class if it wasn't overridden. for field in parent_fields: if (field.name not in field_names and field.name not in new_class.__dict__ and field.name not in inherited_attributes): new_field = copy.deepcopy(field) new_class.add_to_class(field.name, new_field) # Replace parent links defined on this base by the new # field. It will be appropriately resolved if required. if field.one_to_one: for parent, parent_link in base_parents.items(): if field == parent_link: base_parents[parent] = new_field # Pass any non-abstract parent classes onto child. new_class._meta.parents.update(base_parents) # Inherit private fields (like GenericForeignKey) from the parent # class for field in base._meta.private_fields: if field.name in field_names: if not base._meta.abstract: raise FieldError( 'Local field %r in class %r clashes with field of ' 'the same name from base class %r.' % ( field.name, name, base.__name__, ) ) else: field = copy.deepcopy(field) if not base._meta.abstract: field.mti_inherited = True new_class.add_to_class(field.name, field) # Copy indexes so that index names are unique when models extend an # abstract model. new_class._meta.indexes = [copy.deepcopy(idx) for idx in new_class._meta.indexes] if abstract: # Abstract base models can't be instantiated and don't appear in # the list of models for an app. We do the final setup for them a # little differently from normal models. attr_meta.abstract = False new_class.Meta = attr_meta return new_class new_class._prepare() new_class._meta.apps.register_model(new_class._meta.app_label, new_class) return new_class def add_to_class(cls, name, value): if _has_contribute_to_class(value): value.contribute_to_class(cls, name) else: setattr(cls, name, value) def _prepare(cls): """Create some methods once self._meta has been populated.""" opts = cls._meta opts._prepare(cls) if opts.order_with_respect_to: cls.get_next_in_order = partialmethod(cls._get_next_or_previous_in_order, is_next=True) cls.get_previous_in_order = partialmethod(cls._get_next_or_previous_in_order, is_next=False) # Defer creating accessors on the foreign class until it has been # created and registered. If remote_field is None, we're ordering # with respect to a GenericForeignKey and don't know what the # foreign class is - we'll add those accessors later in # contribute_to_class(). if opts.order_with_respect_to.remote_field: wrt = opts.order_with_respect_to remote = wrt.remote_field.model lazy_related_operation(make_foreign_order_accessors, cls, remote) # Give the class a docstring -- its definition. if cls.__doc__ is None: cls.__doc__ = "%s(%s)" % (cls.__name__, ", ".join(f.name for f in opts.fields)) get_absolute_url_override = settings.ABSOLUTE_URL_OVERRIDES.get(opts.label_lower) if get_absolute_url_override: setattr(cls, 'get_absolute_url', get_absolute_url_override) if not opts.managers: if any(f.name == 'objects' for f in opts.fields): raise ValueError( "Model %s must specify a custom Manager, because it has a " "field named 'objects'." % cls.__name__ ) manager = Manager() manager.auto_created = True cls.add_to_class('objects', manager) # Set the name of _meta.indexes. This can't be done in # Options.contribute_to_class() because fields haven't been added to # the model at that point. for index in cls._meta.indexes: if not index.name: index.set_name_with_model(cls) class_prepared.send(sender=cls) @property def _base_manager(cls): return cls._meta.base_manager @property def _default_manager(cls): return cls._meta.default_manager class ModelStateFieldsCacheDescriptor: def __get__(self, instance, cls=None): if instance is None: return self res = instance.fields_cache = {} return res class ModelState: """Store model instance state.""" db = None # If true, uniqueness validation checks will consider this a new, unsaved # object. Necessary for correct validation of new instances of objects with # explicit (non-auto) PKs. This impacts validation only; it has no effect # on the actual save. adding = True fields_cache = ModelStateFieldsCacheDescriptor() class Model(metaclass=ModelBase): def __init__(self, *args, **kwargs): # Alias some things as locals to avoid repeat global lookups cls = self.__class__ opts = self._meta _setattr = setattr _DEFERRED = DEFERRED pre_init.send(sender=cls, args=args, kwargs=kwargs) # Set up the storage for instance state self._state = ModelState() # There is a rather weird disparity here; if kwargs, it's set, then args # overrides it. It should be one or the other; don't duplicate the work # The reason for the kwargs check is that standard iterator passes in by # args, and instantiation for iteration is 33% faster. if len(args) > len(opts.concrete_fields): # Daft, but matches old exception sans the err msg. raise IndexError("Number of args exceeds number of fields") if not kwargs: fields_iter = iter(opts.concrete_fields) # The ordering of the zip calls matter - zip throws StopIteration # when an iter throws it. So if the first iter throws it, the second # is *not* consumed. We rely on this, so don't change the order # without changing the logic. for val, field in zip(args, fields_iter): if val is _DEFERRED: continue _setattr(self, field.attname, val) else: # Slower, kwargs-ready version. fields_iter = iter(opts.fields) for val, field in zip(args, fields_iter): if val is _DEFERRED: continue _setattr(self, field.attname, val) kwargs.pop(field.name, None) # Now we're left with the unprocessed fields that *must* come from # keywords, or default. for field in fields_iter: is_related_object = False # Virtual field if field.attname not in kwargs and field.column is None: continue if kwargs: if isinstance(field.remote_field, ForeignObjectRel): try: # Assume object instance was passed in. rel_obj = kwargs.pop(field.name) is_related_object = True except KeyError: try: # Object instance wasn't passed in -- must be an ID. val = kwargs.pop(field.attname) except KeyError: val = field.get_default() else: # Object instance was passed in. Special case: You can # pass in "None" for related objects if it's allowed. if rel_obj is None and field.null: val = None else: try: val = kwargs.pop(field.attname) except KeyError: # This is done with an exception rather than the # default argument on pop because we don't want # get_default() to be evaluated, and then not used. # Refs #12057. val = field.get_default() else: val = field.get_default() if is_related_object: # If we are passed a related instance, set it using the # field.name instead of field.attname (e.g. "user" instead of # "user_id") so that the object gets properly cached (and type # checked) by the RelatedObjectDescriptor. if rel_obj is not _DEFERRED: _setattr(self, field.name, rel_obj) else: if val is not _DEFERRED: _setattr(self, field.attname, val) if kwargs: property_names = opts._property_names for prop in tuple(kwargs): try: # Any remaining kwargs must correspond to properties or # virtual fields. if prop in property_names or opts.get_field(prop): if kwargs[prop] is not _DEFERRED: _setattr(self, prop, kwargs[prop]) del kwargs[prop] except (AttributeError, FieldDoesNotExist): pass for kwarg in kwargs: raise TypeError("%s() got an unexpected keyword argument '%s'" % (cls.__name__, kwarg)) super().__init__() post_init.send(sender=cls, instance=self) @classmethod def from_db(cls, db, field_names, values): if len(values) != len(cls._meta.concrete_fields): values_iter = iter(values) values = [ next(values_iter) if f.attname in field_names else DEFERRED for f in cls._meta.concrete_fields ] new = cls(*values) new._state.adding = False new._state.db = db return new def __repr__(self): return '<%s: %s>' % (self.__class__.__name__, self) def __str__(self): return '%s object (%s)' % (self.__class__.__name__, self.pk) def __eq__(self, other): if not isinstance(other, Model): return False if self._meta.concrete_model != other._meta.concrete_model: return False my_pk = self.pk if my_pk is None: return self is other return my_pk == other.pk def __hash__(self): if self.pk is None: raise TypeError("Model instances without primary key value are unhashable") return hash(self.pk) def __reduce__(self): data = self.__getstate__() data[DJANGO_VERSION_PICKLE_KEY] = get_version() class_id = self._meta.app_label, self._meta.object_name return model_unpickle, (class_id,), data def __getstate__(self): """Hook to allow choosing the attributes to pickle.""" return self.__dict__ def __setstate__(self, state): msg = None pickled_version = state.get(DJANGO_VERSION_PICKLE_KEY) if pickled_version: current_version = get_version() if current_version != pickled_version: msg = ( "Pickled model instance's Django version %s does not match " "the current version %s." % (pickled_version, current_version) ) else: msg = "Pickled model instance's Django version is not specified." if msg: warnings.warn(msg, RuntimeWarning, stacklevel=2) self.__dict__.update(state) def _get_pk_val(self, meta=None): meta = meta or self._meta return getattr(self, meta.pk.attname) def _set_pk_val(self, value): return setattr(self, self._meta.pk.attname, value) pk = property(_get_pk_val, _set_pk_val) def get_deferred_fields(self): """ Return a set containing names of deferred fields for this instance. """ return { f.attname for f in self._meta.concrete_fields if f.attname not in self.__dict__ } def refresh_from_db(self, using=None, fields=None): """ Reload field values from the database. By default, the reloading happens from the database this instance was loaded from, or by the read router if this instance wasn't loaded from any database. The using parameter will override the default. Fields can be used to specify which fields to reload. The fields should be an iterable of field attnames. If fields is None, then all non-deferred fields are reloaded. When accessing deferred fields of an instance, the deferred loading of the field will call this method. """ if fields is None: self._prefetched_objects_cache = {} else: prefetched_objects_cache = getattr(self, '_prefetched_objects_cache', ()) for field in fields: if field in prefetched_objects_cache: del prefetched_objects_cache[field] fields.remove(field) if not fields: return if any(LOOKUP_SEP in f for f in fields): raise ValueError( 'Found "%s" in fields argument. Relations and transforms ' 'are not allowed in fields.' % LOOKUP_SEP) hints = {'instance': self} db_instance_qs = self.__class__._base_manager.db_manager(using, hints=hints).filter(pk=self.pk) # Use provided fields, if not set then reload all non-deferred fields. deferred_fields = self.get_deferred_fields() if fields is not None: fields = list(fields) db_instance_qs = db_instance_qs.only(*fields) elif deferred_fields: fields = [f.attname for f in self._meta.concrete_fields if f.attname not in deferred_fields] db_instance_qs = db_instance_qs.only(*fields) db_instance = db_instance_qs.get() non_loaded_fields = db_instance.get_deferred_fields() for field in self._meta.concrete_fields: if field.attname in non_loaded_fields: # This field wasn't refreshed - skip ahead. continue setattr(self, field.attname, getattr(db_instance, field.attname)) # Clear cached foreign keys. if field.is_relation and field.is_cached(self): field.delete_cached_value(self) # Clear cached relations. for field in self._meta.related_objects: if field.is_cached(self): field.delete_cached_value(self) self._state.db = db_instance._state.db def serializable_value(self, field_name): """ Return the value of the field name for this instance. If the field is a foreign key, return the id value instead of the object. If there's no Field object with this name on the model, return the model attribute's value. Used to serialize a field's value (in the serializer, or form output, for example). Normally, you would just access the attribute directly and not use this method. """ try: field = self._meta.get_field(field_name) except FieldDoesNotExist: return getattr(self, field_name) return getattr(self, field.attname) def save(self, force_insert=False, force_update=False, using=None, update_fields=None): """ Save the current instance. Override this in a subclass if you want to control the saving process. The 'force_insert' and 'force_update' parameters can be used to insist that the "save" must be an SQL insert or update (or equivalent for non-SQL backends), respectively. Normally, they should not be set. """ # Ensure that a model instance without a PK hasn't been assigned to # a ForeignKey or OneToOneField on this model. If the field is # nullable, allowing the save() would result in silent data loss. for field in self._meta.concrete_fields: # If the related field isn't cached, then an instance hasn't # been assigned and there's no need to worry about this check. if field.is_relation and field.is_cached(self): obj = getattr(self, field.name, None) if not obj: continue # A pk may have been assigned manually to a model instance not # saved to the database (or auto-generated in a case like # UUIDField), but we allow the save to proceed and rely on the # database to raise an IntegrityError if applicable. If # constraints aren't supported by the database, there's the # unavoidable risk of data corruption. if obj.pk is None: # Remove the object from a related instance cache. if not field.remote_field.multiple: field.remote_field.delete_cached_value(obj) raise ValueError( "save() prohibited to prevent data loss due to " "unsaved related object '%s'." % field.name ) elif getattr(self, field.attname) is None: # Use pk from related object if it has been saved after # an assignment. setattr(self, field.attname, obj.pk) # If the relationship's pk/to_field was changed, clear the # cached relationship. if getattr(obj, field.target_field.attname) != getattr(self, field.attname): field.delete_cached_value(self) using = using or router.db_for_write(self.__class__, instance=self) if force_insert and (force_update or update_fields): raise ValueError("Cannot force both insert and updating in model saving.") deferred_fields = self.get_deferred_fields() if update_fields is not None: # If update_fields is empty, skip the save. We do also check for # no-op saves later on for inheritance cases. This bailout is # still needed for skipping signal sending. if not update_fields: return update_fields = frozenset(update_fields) field_names = set() for field in self._meta.fields: if not field.primary_key: field_names.add(field.name) if field.name != field.attname: field_names.add(field.attname) non_model_fields = update_fields.difference(field_names) if non_model_fields: raise ValueError("The following fields do not exist in this " "model or are m2m fields: %s" % ', '.join(non_model_fields)) # If saving to the same database, and this model is deferred, then # automatically do an "update_fields" save on the loaded fields. elif not force_insert and deferred_fields and using == self._state.db: field_names = set() for field in self._meta.concrete_fields: if not field.primary_key and not hasattr(field, 'through'): field_names.add(field.attname) loaded_fields = field_names.difference(deferred_fields) if loaded_fields: update_fields = frozenset(loaded_fields) self.save_base(using=using, force_insert=force_insert, force_update=force_update, update_fields=update_fields) save.alters_data = True def save_base(self, raw=False, force_insert=False, force_update=False, using=None, update_fields=None): """ Handle the parts of saving which should be done only once per save, yet need to be done in raw saves, too. This includes some sanity checks and signal sending. The 'raw' argument is telling save_base not to save any parent models and not to do any changes to the values before save. This is used by fixture loading. """ using = using or router.db_for_write(self.__class__, instance=self) assert not (force_insert and (force_update or update_fields)) assert update_fields is None or update_fields cls = origin = self.__class__ # Skip proxies, but keep the origin as the proxy model. if cls._meta.proxy: cls = cls._meta.concrete_model meta = cls._meta if not meta.auto_created: pre_save.send( sender=origin, instance=self, raw=raw, using=using, update_fields=update_fields, ) # A transaction isn't needed if one query is issued. if meta.parents: context_manager = transaction.atomic(using=using, savepoint=False) else: context_manager = transaction.mark_for_rollback_on_error(using=using) with context_manager: parent_inserted = False if not raw: parent_inserted = self._save_parents(cls, using, update_fields) updated = self._save_table( raw, cls, force_insert or parent_inserted, force_update, using, update_fields, ) # Store the database on which the object was saved self._state.db = using # Once saved, this is no longer a to-be-added instance. self._state.adding = False # Signal that the save is complete if not meta.auto_created: post_save.send( sender=origin, instance=self, created=(not updated), update_fields=update_fields, raw=raw, using=using, ) save_base.alters_data = True def _save_parents(self, cls, using, update_fields): """Save all the parents of cls using values from self.""" meta = cls._meta inserted = False for parent, field in meta.parents.items(): # Make sure the link fields are synced between parent and self. if (field and getattr(self, parent._meta.pk.attname) is None and getattr(self, field.attname) is not None): setattr(self, parent._meta.pk.attname, getattr(self, field.attname)) parent_inserted = self._save_parents(cls=parent, using=using, update_fields=update_fields) updated = self._save_table( cls=parent, using=using, update_fields=update_fields, force_insert=parent_inserted, ) if not updated: inserted = True # Set the parent's PK value to self. if field: setattr(self, field.attname, self._get_pk_val(parent._meta)) # Since we didn't have an instance of the parent handy set # attname directly, bypassing the descriptor. Invalidate # the related object cache, in case it's been accidentally # populated. A fresh instance will be re-built from the # database if necessary. if field.is_cached(self): field.delete_cached_value(self) return inserted def _save_table(self, raw=False, cls=None, force_insert=False, force_update=False, using=None, update_fields=None): """ Do the heavy-lifting involved in saving. Update or insert the data for a single table. """ meta = cls._meta non_pks = [f for f in meta.local_concrete_fields if not f.primary_key] if update_fields: non_pks = [f for f in non_pks if f.name in update_fields or f.attname in update_fields] pk_val = self._get_pk_val(meta) if pk_val is None: pk_val = meta.pk.get_pk_value_on_save(self) setattr(self, meta.pk.attname, pk_val) pk_set = pk_val is not None if not pk_set and (force_update or update_fields): raise ValueError("Cannot force an update in save() with no primary key.") updated = False # If possible, try an UPDATE. If that doesn't update anything, do an INSERT. if pk_set and not force_insert: base_qs = cls._base_manager.using(using) values = [(f, None, (getattr(self, f.attname) if raw else f.pre_save(self, False))) for f in non_pks] forced_update = update_fields or force_update updated = self._do_update(base_qs, using, pk_val, values, update_fields, forced_update) if force_update and not updated: raise DatabaseError("Forced update did not affect any rows.") if update_fields and not updated: raise DatabaseError("Save with update_fields did not affect any rows.") if not updated: if meta.order_with_respect_to: # If this is a model with an order_with_respect_to # autopopulate the _order field field = meta.order_with_respect_to filter_args = field.get_filter_kwargs_for_object(self) order_value = cls._base_manager.using(using).filter(**filter_args).count() self._order = order_value fields = meta.local_concrete_fields if not pk_set: fields = [f for f in fields if f is not meta.auto_field] update_pk = meta.auto_field and not pk_set result = self._do_insert(cls._base_manager, using, fields, update_pk, raw) if update_pk: setattr(self, meta.pk.attname, result) return updated def _do_update(self, base_qs, using, pk_val, values, update_fields, forced_update): """ Try to update the model. Return True if the model was updated (if an update query was done and a matching row was found in the DB). """ filtered = base_qs.filter(pk=pk_val) if not values: # We can end up here when saving a model in inheritance chain where # update_fields doesn't target any field in current model. In that # case we just say the update succeeded. Another case ending up here # is a model with just PK - in that case check that the PK still # exists. return update_fields is not None or filtered.exists() if self._meta.select_on_save and not forced_update: return ( filtered.exists() and # It may happen that the object is deleted from the DB right after # this check, causing the subsequent UPDATE to return zero matching # rows. The same result can occur in some rare cases when the # database returns zero despite the UPDATE being executed # successfully (a row is matched and updated). In order to # distinguish these two cases, the object's existence in the # database is again checked for if the UPDATE query returns 0. (filtered._update(values) > 0 or filtered.exists()) ) return filtered._update(values) > 0 def _do_insert(self, manager, using, fields, update_pk, raw): """ Do an INSERT. If update_pk is defined then this method should return the new pk for the model. """ return manager._insert([self], fields=fields, return_id=update_pk, using=using, raw=raw) def delete(self, using=None, keep_parents=False): using = using or router.db_for_write(self.__class__, instance=self) assert self.pk is not None, ( "%s object can't be deleted because its %s attribute is set to None." % (self._meta.object_name, self._meta.pk.attname) ) collector = Collector(using=using) collector.collect([self], keep_parents=keep_parents) return collector.delete() delete.alters_data = True def _get_FIELD_display(self, field): value = getattr(self, field.attname) # force_str() to coerce lazy strings. return force_str(dict(field.flatchoices).get(value, value), strings_only=True) def _get_next_or_previous_by_FIELD(self, field, is_next, **kwargs): if not self.pk: raise ValueError("get_next/get_previous cannot be used on unsaved objects.") op = 'gt' if is_next else 'lt' order = '' if is_next else '-' param = getattr(self, field.attname) q = Q(**{'%s__%s' % (field.name, op): param}) q = q | Q(**{field.name: param, 'pk__%s' % op: self.pk}) qs = self.__class__._default_manager.using(self._state.db).filter(**kwargs).filter(q).order_by( '%s%s' % (order, field.name), '%spk' % order ) try: return qs[0] except IndexError: raise self.DoesNotExist("%s matching query does not exist." % self.__class__._meta.object_name) def _get_next_or_previous_in_order(self, is_next): cachename = "__%s_order_cache" % is_next if not hasattr(self, cachename): op = 'gt' if is_next else 'lt' order = '_order' if is_next else '-_order' order_field = self._meta.order_with_respect_to filter_args = order_field.get_filter_kwargs_for_object(self) obj = self.__class__._default_manager.filter(**filter_args).filter(**{ '_order__%s' % op: self.__class__._default_manager.values('_order').filter(**{ self._meta.pk.name: self.pk }) }).order_by(order)[:1].get() setattr(self, cachename, obj) return getattr(self, cachename) def prepare_database_save(self, field): if self.pk is None: raise ValueError("Unsaved model instance %r cannot be used in an ORM query." % self) return getattr(self, field.remote_field.get_related_field().attname) def clean(self): """ Hook for doing any extra model-wide validation after clean() has been called on every field by self.clean_fields. Any ValidationError raised by this method will not be associated with a particular field; it will have a special-case association with the field defined by NON_FIELD_ERRORS. """ pass def validate_unique(self, exclude=None): """ Check unique constraints on the model and raise ValidationError if any failed. """ unique_checks, date_checks = self._get_unique_checks(exclude=exclude) errors = self._perform_unique_checks(unique_checks) date_errors = self._perform_date_checks(date_checks) for k, v in date_errors.items(): errors.setdefault(k, []).extend(v) if errors: raise ValidationError(errors) def _get_unique_checks(self, exclude=None): """ Return a list of checks to perform. Since validate_unique() could be called from a ModelForm, some fields may have been excluded; we can't perform a unique check on a model that is missing fields involved in that check. Fields that did not validate should also be excluded, but they need to be passed in via the exclude argument. """ if exclude is None: exclude = [] unique_checks = [] unique_togethers = [(self.__class__, self._meta.unique_together)] constraints = [(self.__class__, self._meta.constraints)] for parent_class in self._meta.get_parent_list(): if parent_class._meta.unique_together: unique_togethers.append((parent_class, parent_class._meta.unique_together)) if parent_class._meta.constraints: constraints.append((parent_class, parent_class._meta.constraints)) for model_class, unique_together in unique_togethers: for check in unique_together: if not any(name in exclude for name in check): # Add the check if the field isn't excluded. unique_checks.append((model_class, tuple(check))) for model_class, model_constraints in constraints: for constraint in model_constraints: if (isinstance(constraint, UniqueConstraint) and # Partial unique constraints can't be validated. constraint.condition is None and not any(name in exclude for name in constraint.fields)): unique_checks.append((model_class, constraint.fields)) # These are checks for the unique_for_<date/year/month>. date_checks = [] # Gather a list of checks for fields declared as unique and add them to # the list of checks. fields_with_class = [(self.__class__, self._meta.local_fields)] for parent_class in self._meta.get_parent_list(): fields_with_class.append((parent_class, parent_class._meta.local_fields)) for model_class, fields in fields_with_class: for f in fields: name = f.name if name in exclude: continue if f.unique: unique_checks.append((model_class, (name,))) if f.unique_for_date and f.unique_for_date not in exclude: date_checks.append((model_class, 'date', name, f.unique_for_date)) if f.unique_for_year and f.unique_for_year not in exclude: date_checks.append((model_class, 'year', name, f.unique_for_year)) if f.unique_for_month and f.unique_for_month not in exclude: date_checks.append((model_class, 'month', name, f.unique_for_month)) return unique_checks, date_checks def _perform_unique_checks(self, unique_checks): errors = {} for model_class, unique_check in unique_checks: # Try to look up an existing object with the same values as this # object's values for all the unique field. lookup_kwargs = {} for field_name in unique_check: f = self._meta.get_field(field_name) lookup_value = getattr(self, f.attname) # TODO: Handle multiple backends with different feature flags. if (lookup_value is None or (lookup_value == '' and connection.features.interprets_empty_strings_as_nulls)): # no value, skip the lookup continue if f.primary_key and not self._state.adding: # no need to check for unique primary key when editing continue lookup_kwargs[str(field_name)] = lookup_value # some fields were skipped, no reason to do the check if len(unique_check) != len(lookup_kwargs): continue qs = model_class._default_manager.filter(**lookup_kwargs) # Exclude the current object from the query if we are editing an # instance (as opposed to creating a new one) # Note that we need to use the pk as defined by model_class, not # self.pk. These can be different fields because model inheritance # allows single model to have effectively multiple primary keys. # Refs #17615. model_class_pk = self._get_pk_val(model_class._meta) if not self._state.adding and model_class_pk is not None: qs = qs.exclude(pk=model_class_pk) if qs.exists(): if len(unique_check) == 1: key = unique_check[0] else: key = NON_FIELD_ERRORS errors.setdefault(key, []).append(self.unique_error_message(model_class, unique_check)) return errors def _perform_date_checks(self, date_checks): errors = {} for model_class, lookup_type, field, unique_for in date_checks: lookup_kwargs = {} # there's a ticket to add a date lookup, we can remove this special # case if that makes it's way in date = getattr(self, unique_for) if date is None: continue if lookup_type == 'date': lookup_kwargs['%s__day' % unique_for] = date.day lookup_kwargs['%s__month' % unique_for] = date.month lookup_kwargs['%s__year' % unique_for] = date.year else: lookup_kwargs['%s__%s' % (unique_for, lookup_type)] = getattr(date, lookup_type) lookup_kwargs[field] = getattr(self, field) qs = model_class._default_manager.filter(**lookup_kwargs) # Exclude the current object from the query if we are editing an # instance (as opposed to creating a new one) if not self._state.adding and self.pk is not None: qs = qs.exclude(pk=self.pk) if qs.exists(): errors.setdefault(field, []).append( self.date_error_message(lookup_type, field, unique_for) ) return errors def date_error_message(self, lookup_type, field_name, unique_for): opts = self._meta field = opts.get_field(field_name) return ValidationError( message=field.error_messages['unique_for_date'], code='unique_for_date', params={ 'model': self, 'model_name': capfirst(opts.verbose_name), 'lookup_type': lookup_type, 'field': field_name, 'field_label': capfirst(field.verbose_name), 'date_field': unique_for, 'date_field_label': capfirst(opts.get_field(unique_for).verbose_name), } ) def unique_error_message(self, model_class, unique_check): opts = model_class._meta params = { 'model': self, 'model_class': model_class, 'model_name': capfirst(opts.verbose_name), 'unique_check': unique_check, } # A unique field if len(unique_check) == 1: field = opts.get_field(unique_check[0]) params['field_label'] = capfirst(field.verbose_name) return ValidationError( message=field.error_messages['unique'], code='unique', params=params, ) # unique_together else: field_labels = [capfirst(opts.get_field(f).verbose_name) for f in unique_check] params['field_labels'] = get_text_list(field_labels, _('and')) return ValidationError( message=_("%(model_name)s with this %(field_labels)s already exists."), code='unique_together', params=params, ) def full_clean(self, exclude=None, validate_unique=True): """ Call clean_fields(), clean(), and validate_unique() on the model. Raise a ValidationError for any errors that occur. """ errors = {} if exclude is None: exclude = [] else: exclude = list(exclude) try: self.clean_fields(exclude=exclude) except ValidationError as e: errors = e.update_error_dict(errors) # Form.clean() is run even if other validation fails, so do the # same with Model.clean() for consistency. try: self.clean() except ValidationError as e: errors = e.update_error_dict(errors) # Run unique checks, but only for fields that passed validation. if validate_unique: for name in errors: if name != NON_FIELD_ERRORS and name not in exclude: exclude.append(name) try: self.validate_unique(exclude=exclude) except ValidationError as e: errors = e.update_error_dict(errors) if errors: raise ValidationError(errors) def clean_fields(self, exclude=None): """ Clean all fields and raise a ValidationError containing a dict of all validation errors if any occur. """ if exclude is None: exclude = [] errors = {} for f in self._meta.fields: if f.name in exclude: continue # Skip validation for empty fields with blank=True. The developer # is responsible for making sure they have a valid value. raw_value = getattr(self, f.attname) if f.blank and raw_value in f.empty_values: continue try: setattr(self, f.attname, f.clean(raw_value, self)) except ValidationError as e: errors[f.name] = e.error_list if errors: raise ValidationError(errors) @classmethod def check(cls, **kwargs): errors = [*cls._check_swappable(), *cls._check_model(), *cls._check_managers(**kwargs)] if not cls._meta.swapped: errors += [ *cls._check_fields(**kwargs), *cls._check_m2m_through_same_relationship(), *cls._check_long_column_names(), ] clash_errors = ( *cls._check_id_field(), *cls._check_field_name_clashes(), *cls._check_model_name_db_lookup_clashes(), *cls._check_property_name_related_field_accessor_clashes(), *cls._check_single_primary_key(), ) errors.extend(clash_errors) # If there are field name clashes, hide consequent column name # clashes. if not clash_errors: errors.extend(cls._check_column_name_clashes()) errors += [ *cls._check_index_together(), *cls._check_unique_together(), *cls._check_indexes(), *cls._check_ordering(), *cls._check_constraints(), ] return errors @classmethod def _check_swappable(cls): """Check if the swapped model exists.""" errors = [] if cls._meta.swapped: try: apps.get_model(cls._meta.swapped) except ValueError: errors.append( checks.Error( "'%s' is not of the form 'app_label.app_name'." % cls._meta.swappable, id='models.E001', ) ) except LookupError: app_label, model_name = cls._meta.swapped.split('.') errors.append( checks.Error( "'%s' references '%s.%s', which has not been " "installed, or is abstract." % ( cls._meta.swappable, app_label, model_name ), id='models.E002', ) ) return errors @classmethod def _check_model(cls): errors = [] if cls._meta.proxy: if cls._meta.local_fields or cls._meta.local_many_to_many: errors.append( checks.Error( "Proxy model '%s' contains model fields." % cls.__name__, id='models.E017', ) ) return errors @classmethod def _check_managers(cls, **kwargs): """Perform all manager checks.""" errors = [] for manager in cls._meta.managers: errors.extend(manager.check(**kwargs)) return errors @classmethod def _check_fields(cls, **kwargs): """Perform all field checks.""" errors = [] for field in cls._meta.local_fields: errors.extend(field.check(**kwargs)) for field in cls._meta.local_many_to_many: errors.extend(field.check(from_model=cls, **kwargs)) return errors @classmethod def _check_m2m_through_same_relationship(cls): """ Check if no relationship model is used by more than one m2m field. """ errors = [] seen_intermediary_signatures = [] fields = cls._meta.local_many_to_many # Skip when the target model wasn't found. fields = (f for f in fields if isinstance(f.remote_field.model, ModelBase)) # Skip when the relationship model wasn't found. fields = (f for f in fields if isinstance(f.remote_field.through, ModelBase)) for f in fields: signature = (f.remote_field.model, cls, f.remote_field.through, f.remote_field.through_fields) if signature in seen_intermediary_signatures: errors.append( checks.Error( "The model has two identical many-to-many relations " "through the intermediate model '%s'." % f.remote_field.through._meta.label, obj=cls, id='models.E003', ) ) else: seen_intermediary_signatures.append(signature) return errors @classmethod def _check_id_field(cls): """Check if `id` field is a primary key.""" fields = [f for f in cls._meta.local_fields if f.name == 'id' and f != cls._meta.pk] # fields is empty or consists of the invalid "id" field if fields and not fields[0].primary_key and cls._meta.pk.name == 'id': return [ checks.Error( "'id' can only be used as a field name if the field also " "sets 'primary_key=True'.", obj=cls, id='models.E004', ) ] else: return [] @classmethod def _check_field_name_clashes(cls): """Forbid field shadowing in multi-table inheritance.""" errors = [] used_fields = {} # name or attname -> field # Check that multi-inheritance doesn't cause field name shadowing. for parent in cls._meta.get_parent_list(): for f in parent._meta.local_fields: clash = used_fields.get(f.name) or used_fields.get(f.attname) or None if clash: errors.append( checks.Error( "The field '%s' from parent model " "'%s' clashes with the field '%s' " "from parent model '%s'." % ( clash.name, clash.model._meta, f.name, f.model._meta ), obj=cls, id='models.E005', ) ) used_fields[f.name] = f used_fields[f.attname] = f # Check that fields defined in the model don't clash with fields from # parents, including auto-generated fields like multi-table inheritance # child accessors. for parent in cls._meta.get_parent_list(): for f in parent._meta.get_fields(): if f not in used_fields: used_fields[f.name] = f for f in cls._meta.local_fields: clash = used_fields.get(f.name) or used_fields.get(f.attname) or None # Note that we may detect clash between user-defined non-unique # field "id" and automatically added unique field "id", both # defined at the same model. This special case is considered in # _check_id_field and here we ignore it. id_conflict = f.name == "id" and clash and clash.name == "id" and clash.model == cls if clash and not id_conflict: errors.append( checks.Error( "The field '%s' clashes with the field '%s' " "from model '%s'." % ( f.name, clash.name, clash.model._meta ), obj=f, id='models.E006', ) ) used_fields[f.name] = f used_fields[f.attname] = f return errors @classmethod def _check_column_name_clashes(cls): # Store a list of column names which have already been used by other fields. used_column_names = [] errors = [] for f in cls._meta.local_fields: _, column_name = f.get_attname_column() # Ensure the column name is not already in use. if column_name and column_name in used_column_names: errors.append( checks.Error( "Field '%s' has column name '%s' that is used by " "another field." % (f.name, column_name), hint="Specify a 'db_column' for the field.", obj=cls, id='models.E007' ) ) else: used_column_names.append(column_name) return errors @classmethod def _check_model_name_db_lookup_clashes(cls): errors = [] model_name = cls.__name__ if model_name.startswith('_') or model_name.endswith('_'): errors.append( checks.Error( "The model name '%s' cannot start or end with an underscore " "as it collides with the query lookup syntax." % model_name, obj=cls, id='models.E023' ) ) elif LOOKUP_SEP in model_name: errors.append( checks.Error( "The model name '%s' cannot contain double underscores as " "it collides with the query lookup syntax." % model_name, obj=cls, id='models.E024' ) ) return errors @classmethod def _check_property_name_related_field_accessor_clashes(cls): errors = [] property_names = cls._meta._property_names related_field_accessors = ( f.get_attname() for f in cls._meta._get_fields(reverse=False) if f.is_relation and f.related_model is not None ) for accessor in related_field_accessors: if accessor in property_names: errors.append( checks.Error( "The property '%s' clashes with a related field " "accessor." % accessor, obj=cls, id='models.E025', ) ) return errors @classmethod def _check_single_primary_key(cls): errors = [] if sum(1 for f in cls._meta.local_fields if f.primary_key) > 1: errors.append( checks.Error( "The model cannot have more than one field with " "'primary_key=True'.", obj=cls, id='models.E026', ) ) return errors @classmethod def _check_index_together(cls): """Check the value of "index_together" option.""" if not isinstance(cls._meta.index_together, (tuple, list)): return [ checks.Error( "'index_together' must be a list or tuple.", obj=cls, id='models.E008', ) ] elif any(not isinstance(fields, (tuple, list)) for fields in cls._meta.index_together): return [ checks.Error( "All 'index_together' elements must be lists or tuples.", obj=cls, id='models.E009', ) ] else: errors = [] for fields in cls._meta.index_together: errors.extend(cls._check_local_fields(fields, "index_together")) return errors @classmethod def _check_unique_together(cls): """Check the value of "unique_together" option.""" if not isinstance(cls._meta.unique_together, (tuple, list)): return [ checks.Error( "'unique_together' must be a list or tuple.", obj=cls, id='models.E010', ) ] elif any(not isinstance(fields, (tuple, list)) for fields in cls._meta.unique_together): return [ checks.Error( "All 'unique_together' elements must be lists or tuples.", obj=cls, id='models.E011', ) ] else: errors = [] for fields in cls._meta.unique_together: errors.extend(cls._check_local_fields(fields, "unique_together")) return errors @classmethod def _check_indexes(cls): """Check the fields and names of indexes.""" errors = [] for index in cls._meta.indexes: # Index name can't start with an underscore or a number, restricted # for cross-database compatibility with Oracle. if index.name[0] == '_' or index.name[0].isdigit(): errors.append( checks.Error( "The index name '%s' cannot start with an underscore " "or a number." % index.name, obj=cls, id='models.E033', ), ) if len(index.name) > index.max_name_length: errors.append( checks.Error( "The index name '%s' cannot be longer than %d " "characters." % (index.name, index.max_name_length), obj=cls, id='models.E034', ), ) fields = [field for index in cls._meta.indexes for field, _ in index.fields_orders] errors.extend(cls._check_local_fields(fields, 'indexes')) return errors @classmethod def _check_local_fields(cls, fields, option): from django.db import models # In order to avoid hitting the relation tree prematurely, we use our # own fields_map instead of using get_field() forward_fields_map = {} for field in cls._meta._get_fields(reverse=False): forward_fields_map[field.name] = field if hasattr(field, 'attname'): forward_fields_map[field.attname] = field errors = [] for field_name in fields: try: field = forward_fields_map[field_name] except KeyError: errors.append( checks.Error( "'%s' refers to the nonexistent field '%s'." % ( option, field_name, ), obj=cls, id='models.E012', ) ) else: if isinstance(field.remote_field, models.ManyToManyRel): errors.append( checks.Error( "'%s' refers to a ManyToManyField '%s', but " "ManyToManyFields are not permitted in '%s'." % ( option, field_name, option, ), obj=cls, id='models.E013', ) ) elif field not in cls._meta.local_fields: errors.append( checks.Error( "'%s' refers to field '%s' which is not local to model '%s'." % (option, field_name, cls._meta.object_name), hint="This issue may be caused by multi-table inheritance.", obj=cls, id='models.E016', ) ) return errors @classmethod def _check_ordering(cls): """ Check "ordering" option -- is it a list of strings and do all fields exist? """ if cls._meta._ordering_clash: return [ checks.Error( "'ordering' and 'order_with_respect_to' cannot be used together.", obj=cls, id='models.E021', ), ] if cls._meta.order_with_respect_to or not cls._meta.ordering: return [] if not isinstance(cls._meta.ordering, (list, tuple)): return [ checks.Error( "'ordering' must be a tuple or list (even if you want to order by only one field).", obj=cls, id='models.E014', ) ] errors = [] fields = cls._meta.ordering # Skip expressions and '?' fields. fields = (f for f in fields if isinstance(f, str) and f != '?') # Convert "-field" to "field". fields = ((f[1:] if f.startswith('-') else f) for f in fields) # Separate related fields and non-related fields. _fields = [] related_fields = [] for f in fields: if LOOKUP_SEP in f: related_fields.append(f) else: _fields.append(f) fields = _fields # Check related fields. for field in related_fields: _cls = cls fld = None for part in field.split(LOOKUP_SEP): try: fld = _cls._meta.get_field(part) if fld.is_relation: _cls = fld.get_path_info()[-1].to_opts.model except (FieldDoesNotExist, AttributeError): if fld is None or fld.get_transform(part) is None: errors.append( checks.Error( "'ordering' refers to the nonexistent field, " "related field, or lookup '%s'." % field, obj=cls, id='models.E015', ) ) # Skip ordering on pk. This is always a valid order_by field # but is an alias and therefore won't be found by opts.get_field. fields = {f for f in fields if f != 'pk'} # Check for invalid or nonexistent fields in ordering. invalid_fields = [] # Any field name that is not present in field_names does not exist. # Also, ordering by m2m fields is not allowed. opts = cls._meta valid_fields = set(chain.from_iterable( (f.name, f.attname) if not (f.auto_created and not f.concrete) else (f.field.related_query_name(),) for f in chain(opts.fields, opts.related_objects) )) invalid_fields.extend(fields - valid_fields) for invalid_field in invalid_fields: errors.append( checks.Error( "'ordering' refers to the nonexistent field, related " "field, or lookup '%s'." % invalid_field, obj=cls, id='models.E015', ) ) return errors @classmethod def _check_long_column_names(cls): """ Check that any auto-generated column names are shorter than the limits for each database in which the model will be created. """ errors = [] allowed_len = None db_alias = None # Find the minimum max allowed length among all specified db_aliases. for db in settings.DATABASES: # skip databases where the model won't be created if not router.allow_migrate_model(db, cls): continue connection = connections[db] max_name_length = connection.ops.max_name_length() if max_name_length is None or connection.features.truncates_names: continue else: if allowed_len is None: allowed_len = max_name_length db_alias = db elif max_name_length < allowed_len: allowed_len = max_name_length db_alias = db if allowed_len is None: return errors for f in cls._meta.local_fields: _, column_name = f.get_attname_column() # Check if auto-generated name for the field is too long # for the database. if f.db_column is None and column_name is not None and len(column_name) > allowed_len: errors.append( checks.Error( 'Autogenerated column name too long for field "%s". ' 'Maximum length is "%s" for database "%s".' % (column_name, allowed_len, db_alias), hint="Set the column name manually using 'db_column'.", obj=cls, id='models.E018', ) ) for f in cls._meta.local_many_to_many: # Skip nonexistent models. if isinstance(f.remote_field.through, str): continue # Check if auto-generated name for the M2M field is too long # for the database. for m2m in f.remote_field.through._meta.local_fields: _, rel_name = m2m.get_attname_column() if m2m.db_column is None and rel_name is not None and len(rel_name) > allowed_len: errors.append( checks.Error( 'Autogenerated column name too long for M2M field ' '"%s". Maximum length is "%s" for database "%s".' % (rel_name, allowed_len, db_alias), hint=( "Use 'through' to create a separate model for " "M2M and then set column_name using 'db_column'." ), obj=cls, id='models.E019', ) ) return errors @classmethod def _check_constraints(cls): errors = [] for db in settings.DATABASES: if not router.allow_migrate_model(db, cls): continue connection = connections[db] if connection.features.supports_table_check_constraints: continue if any(isinstance(constraint, CheckConstraint) for constraint in cls._meta.constraints): errors.append( checks.Warning( '%s does not support check constraints.' % connection.display_name, hint=( "A constraint won't be created. Silence this " "warning if you don't care about it." ), obj=cls, id='models.W027', ) ) return errors ############################################ # HELPER FUNCTIONS (CURRIED MODEL METHODS) # ############################################ # ORDERING METHODS ######################### def method_set_order(self, ordered_obj, id_list, using=None): if using is None: using = DEFAULT_DB_ALIAS order_wrt = ordered_obj._meta.order_with_respect_to filter_args = order_wrt.get_forward_related_filter(self) ordered_obj.objects.db_manager(using).filter(**filter_args).bulk_update([ ordered_obj(pk=pk, _order=order) for order, pk in enumerate(id_list) ], ['_order']) def method_get_order(self, ordered_obj): order_wrt = ordered_obj._meta.order_with_respect_to filter_args = order_wrt.get_forward_related_filter(self) pk_name = ordered_obj._meta.pk.name return ordered_obj.objects.filter(**filter_args).values_list(pk_name, flat=True) def make_foreign_order_accessors(model, related_model): setattr( related_model, 'get_%s_order' % model.__name__.lower(), partialmethod(method_get_order, model) ) setattr( related_model, 'set_%s_order' % model.__name__.lower(), partialmethod(method_set_order, model) ) ######## # MISC # ######## def model_unpickle(model_id): """Used to unpickle Model subclasses with deferred fields.""" if isinstance(model_id, tuple): model = apps.get_model(*model_id) else: # Backwards compat - the model was cached directly in earlier versions. model = model_id return model.__new__(model) model_unpickle.__safe_for_unpickle__ = True
27e0bdb2d91de887c9d6cbf75604736695257b367b4eac013a45e9680c10ba8e
import copy import datetime import inspect from decimal import Decimal from django.core.exceptions import EmptyResultSet, FieldError from django.db import connection from django.db.models import fields from django.db.models.query_utils import Q from django.db.utils import NotSupportedError from django.utils.deconstruct import deconstructible from django.utils.functional import cached_property from django.utils.hashable import make_hashable class SQLiteNumericMixin: """ Some expressions with output_field=DecimalField() must be cast to numeric to be properly filtered. """ def as_sqlite(self, compiler, connection, **extra_context): sql, params = self.as_sql(compiler, connection, **extra_context) try: if self.output_field.get_internal_type() == 'DecimalField': sql = 'CAST(%s AS NUMERIC)' % sql except FieldError: pass return sql, params class Combinable: """ Provide the ability to combine one or two objects with some connector. For example F('foo') + F('bar'). """ # Arithmetic connectors ADD = '+' SUB = '-' MUL = '*' DIV = '/' POW = '^' # The following is a quoted % operator - it is quoted because it can be # used in strings that also have parameter substitution. MOD = '%%' # Bitwise operators - note that these are generated by .bitand() # and .bitor(), the '&' and '|' are reserved for boolean operator # usage. BITAND = '&' BITOR = '|' BITLEFTSHIFT = '<<' BITRIGHTSHIFT = '>>' def _combine(self, other, connector, reversed): if not hasattr(other, 'resolve_expression'): # everything must be resolvable to an expression if isinstance(other, datetime.timedelta): other = DurationValue(other, output_field=fields.DurationField()) else: other = Value(other) if reversed: return CombinedExpression(other, connector, self) return CombinedExpression(self, connector, other) ############# # OPERATORS # ############# def __neg__(self): return self._combine(-1, self.MUL, False) def __add__(self, other): return self._combine(other, self.ADD, False) def __sub__(self, other): return self._combine(other, self.SUB, False) def __mul__(self, other): return self._combine(other, self.MUL, False) def __truediv__(self, other): return self._combine(other, self.DIV, False) def __mod__(self, other): return self._combine(other, self.MOD, False) def __pow__(self, other): return self._combine(other, self.POW, False) def __and__(self, other): raise NotImplementedError( "Use .bitand() and .bitor() for bitwise logical operations." ) def bitand(self, other): return self._combine(other, self.BITAND, False) def bitleftshift(self, other): return self._combine(other, self.BITLEFTSHIFT, False) def bitrightshift(self, other): return self._combine(other, self.BITRIGHTSHIFT, False) def __or__(self, other): raise NotImplementedError( "Use .bitand() and .bitor() for bitwise logical operations." ) def bitor(self, other): return self._combine(other, self.BITOR, False) def __radd__(self, other): return self._combine(other, self.ADD, True) def __rsub__(self, other): return self._combine(other, self.SUB, True) def __rmul__(self, other): return self._combine(other, self.MUL, True) def __rtruediv__(self, other): return self._combine(other, self.DIV, True) def __rmod__(self, other): return self._combine(other, self.MOD, True) def __rpow__(self, other): return self._combine(other, self.POW, True) def __rand__(self, other): raise NotImplementedError( "Use .bitand() and .bitor() for bitwise logical operations." ) def __ror__(self, other): raise NotImplementedError( "Use .bitand() and .bitor() for bitwise logical operations." ) @deconstructible class BaseExpression: """Base class for all query expressions.""" # aggregate specific fields is_summary = False _output_field_resolved_to_none = False # Can the expression be used in a WHERE clause? filterable = True # Can the expression can be used as a source expression in Window? window_compatible = False def __init__(self, output_field=None): if output_field is not None: self.output_field = output_field def __getstate__(self): state = self.__dict__.copy() state.pop('convert_value', None) return state def get_db_converters(self, connection): return ( [] if self.convert_value is self._convert_value_noop else [self.convert_value] ) + self.output_field.get_db_converters(connection) def get_source_expressions(self): return [] def set_source_expressions(self, exprs): assert not exprs def _parse_expressions(self, *expressions): return [ arg if hasattr(arg, 'resolve_expression') else ( F(arg) if isinstance(arg, str) else Value(arg) ) for arg in expressions ] def as_sql(self, compiler, connection): """ Responsible for returning a (sql, [params]) tuple to be included in the current query. Different backends can provide their own implementation, by providing an `as_{vendor}` method and patching the Expression: ``` def override_as_sql(self, compiler, connection): # custom logic return super().as_sql(compiler, connection) setattr(Expression, 'as_' + connection.vendor, override_as_sql) ``` Arguments: * compiler: the query compiler responsible for generating the query. Must have a compile method, returning a (sql, [params]) tuple. Calling compiler(value) will return a quoted `value`. * connection: the database connection used for the current query. Return: (sql, params) Where `sql` is a string containing ordered sql parameters to be replaced with the elements of the list `params`. """ raise NotImplementedError("Subclasses must implement as_sql()") @cached_property def contains_aggregate(self): return any(expr and expr.contains_aggregate for expr in self.get_source_expressions()) @cached_property def contains_over_clause(self): return any(expr and expr.contains_over_clause for expr in self.get_source_expressions()) @cached_property def contains_column_references(self): return any(expr and expr.contains_column_references for expr in self.get_source_expressions()) def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): """ Provide the chance to do any preprocessing or validation before being added to the query. Arguments: * query: the backend query implementation * allow_joins: boolean allowing or denying use of joins in this query * reuse: a set of reusable joins for multijoins * summarize: a terminal aggregate clause * for_save: whether this expression about to be used in a save or update Return: an Expression to be added to the query. """ c = self.copy() c.is_summary = summarize c.set_source_expressions([ expr.resolve_expression(query, allow_joins, reuse, summarize) if expr else None for expr in c.get_source_expressions() ]) return c @property def field(self): return self.output_field @cached_property def output_field(self): """Return the output type of this expressions.""" output_field = self._resolve_output_field() if output_field is None: self._output_field_resolved_to_none = True raise FieldError('Cannot resolve expression type, unknown output_field') return output_field @cached_property def _output_field_or_none(self): """ Return the output field of this expression, or None if _resolve_output_field() didn't return an output type. """ try: return self.output_field except FieldError: if not self._output_field_resolved_to_none: raise def _resolve_output_field(self): """ Attempt to infer the output type of the expression. If the output fields of all source fields match then, simply infer the same type here. This isn't always correct, but it makes sense most of the time. Consider the difference between `2 + 2` and `2 / 3`. Inferring the type here is a convenience for the common case. The user should supply their own output_field with more complex computations. If a source's output field resolves to None, exclude it from this check. If all sources are None, then an error is raised higher up the stack in the output_field property. """ sources_iter = (source for source in self.get_source_fields() if source is not None) for output_field in sources_iter: for source in sources_iter: if not isinstance(output_field, source.__class__): raise FieldError( 'Expression contains mixed types: %s, %s. You must ' 'set output_field.' % ( output_field.__class__.__name__, source.__class__.__name__, ) ) return output_field @staticmethod def _convert_value_noop(value, expression, connection): return value @cached_property def convert_value(self): """ Expressions provide their own converters because users have the option of manually specifying the output_field which may be a different type from the one the database returns. """ field = self.output_field internal_type = field.get_internal_type() if internal_type == 'FloatField': return lambda value, expression, connection: None if value is None else float(value) elif internal_type.endswith('IntegerField'): return lambda value, expression, connection: None if value is None else int(value) elif internal_type == 'DecimalField': return lambda value, expression, connection: None if value is None else Decimal(value) return self._convert_value_noop def get_lookup(self, lookup): return self.output_field.get_lookup(lookup) def get_transform(self, name): return self.output_field.get_transform(name) def relabeled_clone(self, change_map): clone = self.copy() clone.set_source_expressions([ e.relabeled_clone(change_map) if e is not None else None for e in self.get_source_expressions() ]) return clone def copy(self): return copy.copy(self) def get_group_by_cols(self, alias=None): if not self.contains_aggregate: return [self] cols = [] for source in self.get_source_expressions(): cols.extend(source.get_group_by_cols()) return cols def get_source_fields(self): """Return the underlying field types used by this aggregate.""" return [e._output_field_or_none for e in self.get_source_expressions()] def asc(self, **kwargs): return OrderBy(self, **kwargs) def desc(self, **kwargs): return OrderBy(self, descending=True, **kwargs) def reverse_ordering(self): return self def flatten(self): """ Recursively yield this expression and all subexpressions, in depth-first order. """ yield self for expr in self.get_source_expressions(): if expr: yield from expr.flatten() @cached_property def identity(self): constructor_signature = inspect.signature(self.__init__) args, kwargs = self._constructor_args signature = constructor_signature.bind_partial(*args, **kwargs) signature.apply_defaults() arguments = signature.arguments.items() identity = [self.__class__] for arg, value in arguments: if isinstance(value, fields.Field): if value.name and value.model: value = (value.model._meta.label, value.name) else: value = type(value) else: value = make_hashable(value) identity.append((arg, value)) return tuple(identity) def __eq__(self, other): return isinstance(other, BaseExpression) and other.identity == self.identity def __hash__(self): return hash(self.identity) class Expression(BaseExpression, Combinable): """An expression that can be combined with other expressions.""" pass class CombinedExpression(SQLiteNumericMixin, Expression): def __init__(self, lhs, connector, rhs, output_field=None): super().__init__(output_field=output_field) self.connector = connector self.lhs = lhs self.rhs = rhs def __repr__(self): return "<{}: {}>".format(self.__class__.__name__, self) def __str__(self): return "{} {} {}".format(self.lhs, self.connector, self.rhs) def get_source_expressions(self): return [self.lhs, self.rhs] def set_source_expressions(self, exprs): self.lhs, self.rhs = exprs def as_sql(self, compiler, connection): try: lhs_output = self.lhs.output_field except FieldError: lhs_output = None try: rhs_output = self.rhs.output_field except FieldError: rhs_output = None if (not connection.features.has_native_duration_field and ((lhs_output and lhs_output.get_internal_type() == 'DurationField') or (rhs_output and rhs_output.get_internal_type() == 'DurationField'))): return DurationExpression(self.lhs, self.connector, self.rhs).as_sql(compiler, connection) if (lhs_output and rhs_output and self.connector == self.SUB and lhs_output.get_internal_type() in {'DateField', 'DateTimeField', 'TimeField'} and lhs_output.get_internal_type() == rhs_output.get_internal_type()): return TemporalSubtraction(self.lhs, self.rhs).as_sql(compiler, connection) expressions = [] expression_params = [] sql, params = compiler.compile(self.lhs) expressions.append(sql) expression_params.extend(params) sql, params = compiler.compile(self.rhs) expressions.append(sql) expression_params.extend(params) # order of precedence expression_wrapper = '(%s)' sql = connection.ops.combine_expression(self.connector, expressions) return expression_wrapper % sql, expression_params def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): c = self.copy() c.is_summary = summarize c.lhs = c.lhs.resolve_expression(query, allow_joins, reuse, summarize, for_save) c.rhs = c.rhs.resolve_expression(query, allow_joins, reuse, summarize, for_save) return c class DurationExpression(CombinedExpression): def compile(self, side, compiler, connection): if not isinstance(side, DurationValue): try: output = side.output_field except FieldError: pass else: if output.get_internal_type() == 'DurationField': sql, params = compiler.compile(side) return connection.ops.format_for_duration_arithmetic(sql), params return compiler.compile(side) def as_sql(self, compiler, connection): connection.ops.check_expression_support(self) expressions = [] expression_params = [] sql, params = self.compile(self.lhs, compiler, connection) expressions.append(sql) expression_params.extend(params) sql, params = self.compile(self.rhs, compiler, connection) expressions.append(sql) expression_params.extend(params) # order of precedence expression_wrapper = '(%s)' sql = connection.ops.combine_duration_expression(self.connector, expressions) return expression_wrapper % sql, expression_params class TemporalSubtraction(CombinedExpression): output_field = fields.DurationField() def __init__(self, lhs, rhs): super().__init__(lhs, self.SUB, rhs) def as_sql(self, compiler, connection): connection.ops.check_expression_support(self) lhs = compiler.compile(self.lhs, connection) rhs = compiler.compile(self.rhs, connection) return connection.ops.subtract_temporals(self.lhs.output_field.get_internal_type(), lhs, rhs) @deconstructible class F(Combinable): """An object capable of resolving references to existing query objects.""" # Can the expression be used in a WHERE clause? filterable = True def __init__(self, name): """ Arguments: * name: the name of the field this expression references """ self.name = name def __repr__(self): return "{}({})".format(self.__class__.__name__, self.name) def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False, simple_col=False): return query.resolve_ref(self.name, allow_joins, reuse, summarize, simple_col) def asc(self, **kwargs): return OrderBy(self, **kwargs) def desc(self, **kwargs): return OrderBy(self, descending=True, **kwargs) def __eq__(self, other): return self.__class__ == other.__class__ and self.name == other.name def __hash__(self): return hash(self.name) class ResolvedOuterRef(F): """ An object that contains a reference to an outer query. In this case, the reference to the outer query has been resolved because the inner query has been used as a subquery. """ contains_aggregate = False def as_sql(self, *args, **kwargs): raise ValueError( 'This queryset contains a reference to an outer query and may ' 'only be used in a subquery.' ) def relabeled_clone(self, relabels): return self class OuterRef(F): def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False, simple_col=False): if isinstance(self.name, self.__class__): return self.name return ResolvedOuterRef(self.name) class Func(SQLiteNumericMixin, Expression): """An SQL function call.""" function = None template = '%(function)s(%(expressions)s)' arg_joiner = ', ' arity = None # The number of arguments the function accepts. def __init__(self, *expressions, output_field=None, **extra): if self.arity is not None and len(expressions) != self.arity: raise TypeError( "'%s' takes exactly %s %s (%s given)" % ( self.__class__.__name__, self.arity, "argument" if self.arity == 1 else "arguments", len(expressions), ) ) super().__init__(output_field=output_field) self.source_expressions = self._parse_expressions(*expressions) self.extra = extra def __repr__(self): args = self.arg_joiner.join(str(arg) for arg in self.source_expressions) extra = {**self.extra, **self._get_repr_options()} if extra: extra = ', '.join(str(key) + '=' + str(val) for key, val in sorted(extra.items())) return "{}({}, {})".format(self.__class__.__name__, args, extra) return "{}({})".format(self.__class__.__name__, args) def _get_repr_options(self): """Return a dict of extra __init__() options to include in the repr.""" return {} def get_source_expressions(self): return self.source_expressions def set_source_expressions(self, exprs): self.source_expressions = exprs def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): c = self.copy() c.is_summary = summarize for pos, arg in enumerate(c.source_expressions): c.source_expressions[pos] = arg.resolve_expression(query, allow_joins, reuse, summarize, for_save) return c def as_sql(self, compiler, connection, function=None, template=None, arg_joiner=None, **extra_context): connection.ops.check_expression_support(self) sql_parts = [] params = [] for arg in self.source_expressions: arg_sql, arg_params = compiler.compile(arg) sql_parts.append(arg_sql) params.extend(arg_params) data = {**self.extra, **extra_context} # Use the first supplied value in this order: the parameter to this # method, a value supplied in __init__()'s **extra (the value in # `data`), or the value defined on the class. if function is not None: data['function'] = function else: data.setdefault('function', self.function) template = template or data.get('template', self.template) arg_joiner = arg_joiner or data.get('arg_joiner', self.arg_joiner) data['expressions'] = data['field'] = arg_joiner.join(sql_parts) return template % data, params def copy(self): copy = super().copy() copy.source_expressions = self.source_expressions[:] copy.extra = self.extra.copy() return copy class Value(Expression): """Represent a wrapped value as a node within an expression.""" def __init__(self, value, output_field=None): """ Arguments: * value: the value this expression represents. The value will be added into the sql parameter list and properly quoted. * output_field: an instance of the model field type that this expression will return, such as IntegerField() or CharField(). """ super().__init__(output_field=output_field) self.value = value def __repr__(self): return "{}({})".format(self.__class__.__name__, self.value) def as_sql(self, compiler, connection): connection.ops.check_expression_support(self) val = self.value output_field = self._output_field_or_none if output_field is not None: if self.for_save: val = output_field.get_db_prep_save(val, connection=connection) else: val = output_field.get_db_prep_value(val, connection=connection) if hasattr(output_field, 'get_placeholder'): return output_field.get_placeholder(val, compiler, connection), [val] if val is None: # cx_Oracle does not always convert None to the appropriate # NULL type (like in case expressions using numbers), so we # use a literal SQL NULL return 'NULL', [] return '%s', [val] def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): c = super().resolve_expression(query, allow_joins, reuse, summarize, for_save) c.for_save = for_save return c def get_group_by_cols(self, alias=None): return [] class DurationValue(Value): def as_sql(self, compiler, connection): connection.ops.check_expression_support(self) if connection.features.has_native_duration_field: return super().as_sql(compiler, connection) return connection.ops.date_interval_sql(self.value), [] class RawSQL(Expression): def __init__(self, sql, params, output_field=None): if output_field is None: output_field = fields.Field() self.sql, self.params = sql, params super().__init__(output_field=output_field) def __repr__(self): return "{}({}, {})".format(self.__class__.__name__, self.sql, self.params) def as_sql(self, compiler, connection): return '(%s)' % self.sql, self.params def get_group_by_cols(self, alias=None): return [self] class Star(Expression): def __repr__(self): return "'*'" def as_sql(self, compiler, connection): return '*', [] class Random(Expression): output_field = fields.FloatField() def __repr__(self): return "Random()" def as_sql(self, compiler, connection): return connection.ops.random_function_sql(), [] class Col(Expression): contains_column_references = True def __init__(self, alias, target, output_field=None): if output_field is None: output_field = target super().__init__(output_field=output_field) self.alias, self.target = alias, target def __repr__(self): return "{}({}, {})".format( self.__class__.__name__, self.alias, self.target) def as_sql(self, compiler, connection): qn = compiler.quote_name_unless_alias return "%s.%s" % (qn(self.alias), qn(self.target.column)), [] def relabeled_clone(self, relabels): return self.__class__(relabels.get(self.alias, self.alias), self.target, self.output_field) def get_group_by_cols(self, alias=None): return [self] def get_db_converters(self, connection): if self.target == self.output_field: return self.output_field.get_db_converters(connection) return (self.output_field.get_db_converters(connection) + self.target.get_db_converters(connection)) class SimpleCol(Expression): """ Represents the SQL of a column name without the table name. This variant of Col doesn't include the table name (or an alias) to avoid a syntax error in check constraints. """ contains_column_references = True def __init__(self, target, output_field=None): if output_field is None: output_field = target super().__init__(output_field=output_field) self.target = target def __repr__(self): return '{}({})'.format(self.__class__.__name__, self.target) def as_sql(self, compiler, connection): qn = compiler.quote_name_unless_alias return qn(self.target.column), [] def get_group_by_cols(self, alias=None): return [self] def get_db_converters(self, connection): if self.target == self.output_field: return self.output_field.get_db_converters(connection) return ( self.output_field.get_db_converters(connection) + self.target.get_db_converters(connection) ) class Ref(Expression): """ Reference to column alias of the query. For example, Ref('sum_cost') in qs.annotate(sum_cost=Sum('cost')) query. """ def __init__(self, refs, source): super().__init__() self.refs, self.source = refs, source def __repr__(self): return "{}({}, {})".format(self.__class__.__name__, self.refs, self.source) def get_source_expressions(self): return [self.source] def set_source_expressions(self, exprs): self.source, = exprs def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): # The sub-expression `source` has already been resolved, as this is # just a reference to the name of `source`. return self def relabeled_clone(self, relabels): return self def as_sql(self, compiler, connection): return connection.ops.quote_name(self.refs), [] def get_group_by_cols(self, alias=None): return [self] class ExpressionList(Func): """ An expression containing multiple expressions. Can be used to provide a list of expressions as an argument to another expression, like an ordering clause. """ template = '%(expressions)s' def __init__(self, *expressions, **extra): if not expressions: raise ValueError('%s requires at least one expression.' % self.__class__.__name__) super().__init__(*expressions, **extra) def __str__(self): return self.arg_joiner.join(str(arg) for arg in self.source_expressions) class ExpressionWrapper(Expression): """ An expression that can wrap another expression so that it can provide extra context to the inner expression, such as the output_field. """ def __init__(self, expression, output_field): super().__init__(output_field=output_field) self.expression = expression def set_source_expressions(self, exprs): self.expression = exprs[0] def get_source_expressions(self): return [self.expression] def as_sql(self, compiler, connection): return self.expression.as_sql(compiler, connection) def __repr__(self): return "{}({})".format(self.__class__.__name__, self.expression) class When(Expression): template = 'WHEN %(condition)s THEN %(result)s' def __init__(self, condition=None, then=None, **lookups): if lookups and condition is None: condition, lookups = Q(**lookups), None if condition is None or not getattr(condition, 'conditional', False) or lookups: raise TypeError("__init__() takes either a Q object or lookups as keyword arguments") if isinstance(condition, Q) and not condition: raise ValueError("An empty Q() can't be used as a When() condition.") super().__init__(output_field=None) self.condition = condition self.result = self._parse_expressions(then)[0] def __str__(self): return "WHEN %r THEN %r" % (self.condition, self.result) def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self) def get_source_expressions(self): return [self.condition, self.result] def set_source_expressions(self, exprs): self.condition, self.result = exprs def get_source_fields(self): # We're only interested in the fields of the result expressions. return [self.result._output_field_or_none] def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): c = self.copy() c.is_summary = summarize if hasattr(c.condition, 'resolve_expression'): c.condition = c.condition.resolve_expression(query, allow_joins, reuse, summarize, False) c.result = c.result.resolve_expression(query, allow_joins, reuse, summarize, for_save) return c def as_sql(self, compiler, connection, template=None, **extra_context): connection.ops.check_expression_support(self) template_params = extra_context sql_params = [] condition_sql, condition_params = compiler.compile(self.condition) template_params['condition'] = condition_sql sql_params.extend(condition_params) result_sql, result_params = compiler.compile(self.result) template_params['result'] = result_sql sql_params.extend(result_params) template = template or self.template return template % template_params, sql_params def get_group_by_cols(self, alias=None): # This is not a complete expression and cannot be used in GROUP BY. cols = [] for source in self.get_source_expressions(): cols.extend(source.get_group_by_cols()) return cols class Case(Expression): """ An SQL searched CASE expression: CASE WHEN n > 0 THEN 'positive' WHEN n < 0 THEN 'negative' ELSE 'zero' END """ template = 'CASE %(cases)s ELSE %(default)s END' case_joiner = ' ' def __init__(self, *cases, default=None, output_field=None, **extra): if not all(isinstance(case, When) for case in cases): raise TypeError("Positional arguments must all be When objects.") super().__init__(output_field) self.cases = list(cases) self.default = self._parse_expressions(default)[0] self.extra = extra def __str__(self): return "CASE %s, ELSE %r" % (', '.join(str(c) for c in self.cases), self.default) def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self) def get_source_expressions(self): return self.cases + [self.default] def set_source_expressions(self, exprs): *self.cases, self.default = exprs def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): c = self.copy() c.is_summary = summarize for pos, case in enumerate(c.cases): c.cases[pos] = case.resolve_expression(query, allow_joins, reuse, summarize, for_save) c.default = c.default.resolve_expression(query, allow_joins, reuse, summarize, for_save) return c def copy(self): c = super().copy() c.cases = c.cases[:] return c def as_sql(self, compiler, connection, template=None, case_joiner=None, **extra_context): connection.ops.check_expression_support(self) if not self.cases: return compiler.compile(self.default) template_params = {**self.extra, **extra_context} case_parts = [] sql_params = [] for case in self.cases: try: case_sql, case_params = compiler.compile(case) except EmptyResultSet: continue case_parts.append(case_sql) sql_params.extend(case_params) default_sql, default_params = compiler.compile(self.default) if not case_parts: return default_sql, default_params case_joiner = case_joiner or self.case_joiner template_params['cases'] = case_joiner.join(case_parts) template_params['default'] = default_sql sql_params.extend(default_params) template = template or template_params.get('template', self.template) sql = template % template_params if self._output_field_or_none is not None: sql = connection.ops.unification_cast_sql(self.output_field) % sql return sql, sql_params class Subquery(Expression): """ An explicit subquery. It may contain OuterRef() references to the outer query which will be resolved when it is applied to that query. """ template = '(%(subquery)s)' contains_aggregate = False def __init__(self, queryset, output_field=None, **extra): self.query = queryset.query self.extra = extra super().__init__(output_field) def get_source_expressions(self): return [self.query] def set_source_expressions(self, exprs): self.query = exprs[0] def _resolve_output_field(self): return self.query.output_field def copy(self): clone = super().copy() clone.query = clone.query.clone() return clone @property def external_aliases(self): return self.query.external_aliases def as_sql(self, compiler, connection, template=None, **extra_context): connection.ops.check_expression_support(self) template_params = {**self.extra, **extra_context} subquery_sql, sql_params = self.query.as_sql(compiler, connection) template_params['subquery'] = subquery_sql[1:-1] template = template or template_params.get('template', self.template) sql = template % template_params return sql, sql_params def get_group_by_cols(self, alias=None): if alias: return [Ref(alias, self)] return [] class Exists(Subquery): template = 'EXISTS(%(subquery)s)' output_field = fields.BooleanField() def __init__(self, queryset, negated=False, **kwargs): # As a performance optimization, remove ordering since EXISTS doesn't # care about it, just whether or not a row matches. queryset = queryset.order_by() self.negated = negated super().__init__(queryset, **kwargs) def __invert__(self): clone = self.copy() clone.negated = not self.negated return clone def as_sql(self, compiler, connection, template=None, **extra_context): sql, params = super().as_sql(compiler, connection, template, **extra_context) if self.negated: sql = 'NOT {}'.format(sql) return sql, params def as_oracle(self, compiler, connection, template=None, **extra_context): # Oracle doesn't allow EXISTS() in the SELECT list, so wrap it with a # CASE WHEN expression. Change the template since the When expression # requires a left hand side (column) to compare against. sql, params = self.as_sql(compiler, connection, template, **extra_context) sql = 'CASE WHEN {} THEN 1 ELSE 0 END'.format(sql) return sql, params class OrderBy(BaseExpression): template = '%(expression)s %(ordering)s' def __init__(self, expression, descending=False, nulls_first=False, nulls_last=False): if nulls_first and nulls_last: raise ValueError('nulls_first and nulls_last are mutually exclusive') self.nulls_first = nulls_first self.nulls_last = nulls_last self.descending = descending if not hasattr(expression, 'resolve_expression'): raise ValueError('expression must be an expression type') self.expression = expression def __repr__(self): return "{}({}, descending={})".format( self.__class__.__name__, self.expression, self.descending) def set_source_expressions(self, exprs): self.expression = exprs[0] def get_source_expressions(self): return [self.expression] def as_sql(self, compiler, connection, template=None, **extra_context): if not template: if self.nulls_last: template = '%s NULLS LAST' % self.template elif self.nulls_first: template = '%s NULLS FIRST' % self.template connection.ops.check_expression_support(self) expression_sql, params = compiler.compile(self.expression) placeholders = { 'expression': expression_sql, 'ordering': 'DESC' if self.descending else 'ASC', **extra_context, } template = template or self.template params *= template.count('%(expression)s') return (template % placeholders).rstrip(), params def as_sqlite(self, compiler, connection): template = None if self.nulls_last: template = '%(expression)s IS NULL, %(expression)s %(ordering)s' elif self.nulls_first: template = '%(expression)s IS NOT NULL, %(expression)s %(ordering)s' return self.as_sql(compiler, connection, template=template) def as_mysql(self, compiler, connection): template = None if self.nulls_last: template = 'IF(ISNULL(%(expression)s),1,0), %(expression)s %(ordering)s ' elif self.nulls_first: template = 'IF(ISNULL(%(expression)s),0,1), %(expression)s %(ordering)s ' return self.as_sql(compiler, connection, template=template) def get_group_by_cols(self, alias=None): cols = [] for source in self.get_source_expressions(): cols.extend(source.get_group_by_cols()) return cols def reverse_ordering(self): self.descending = not self.descending if self.nulls_first or self.nulls_last: self.nulls_first = not self.nulls_first self.nulls_last = not self.nulls_last return self def asc(self): self.descending = False def desc(self): self.descending = True class Window(Expression): template = '%(expression)s OVER (%(window)s)' # Although the main expression may either be an aggregate or an # expression with an aggregate function, the GROUP BY that will # be introduced in the query as a result is not desired. contains_aggregate = False contains_over_clause = True filterable = False def __init__(self, expression, partition_by=None, order_by=None, frame=None, output_field=None): self.partition_by = partition_by self.order_by = order_by self.frame = frame if not getattr(expression, 'window_compatible', False): raise ValueError( "Expression '%s' isn't compatible with OVER clauses." % expression.__class__.__name__ ) if self.partition_by is not None: if not isinstance(self.partition_by, (tuple, list)): self.partition_by = (self.partition_by,) self.partition_by = ExpressionList(*self.partition_by) if self.order_by is not None: if isinstance(self.order_by, (list, tuple)): self.order_by = ExpressionList(*self.order_by) elif not isinstance(self.order_by, BaseExpression): raise ValueError( 'order_by must be either an Expression or a sequence of ' 'expressions.' ) super().__init__(output_field=output_field) self.source_expression = self._parse_expressions(expression)[0] def _resolve_output_field(self): return self.source_expression.output_field def get_source_expressions(self): return [self.source_expression, self.partition_by, self.order_by, self.frame] def set_source_expressions(self, exprs): self.source_expression, self.partition_by, self.order_by, self.frame = exprs def as_sql(self, compiler, connection, template=None): connection.ops.check_expression_support(self) if not connection.features.supports_over_clause: raise NotSupportedError('This backend does not support window expressions.') expr_sql, params = compiler.compile(self.source_expression) window_sql, window_params = [], [] if self.partition_by is not None: sql_expr, sql_params = self.partition_by.as_sql( compiler=compiler, connection=connection, template='PARTITION BY %(expressions)s', ) window_sql.extend(sql_expr) window_params.extend(sql_params) if self.order_by is not None: window_sql.append(' ORDER BY ') order_sql, order_params = compiler.compile(self.order_by) window_sql.extend(order_sql) window_params.extend(order_params) if self.frame: frame_sql, frame_params = compiler.compile(self.frame) window_sql.append(' ' + frame_sql) window_params.extend(frame_params) params.extend(window_params) template = template or self.template return template % { 'expression': expr_sql, 'window': ''.join(window_sql).strip() }, params def __str__(self): return '{} OVER ({}{}{})'.format( str(self.source_expression), 'PARTITION BY ' + str(self.partition_by) if self.partition_by else '', 'ORDER BY ' + str(self.order_by) if self.order_by else '', str(self.frame or ''), ) def __repr__(self): return '<%s: %s>' % (self.__class__.__name__, self) def get_group_by_cols(self, alias=None): return [] class WindowFrame(Expression): """ Model the frame clause in window expressions. There are two types of frame clauses which are subclasses, however, all processing and validation (by no means intended to be complete) is done here. Thus, providing an end for a frame is optional (the default is UNBOUNDED FOLLOWING, which is the last row in the frame). """ template = '%(frame_type)s BETWEEN %(start)s AND %(end)s' def __init__(self, start=None, end=None): self.start = Value(start) self.end = Value(end) def set_source_expressions(self, exprs): self.start, self.end = exprs def get_source_expressions(self): return [self.start, self.end] def as_sql(self, compiler, connection): connection.ops.check_expression_support(self) start, end = self.window_frame_start_end(connection, self.start.value, self.end.value) return self.template % { 'frame_type': self.frame_type, 'start': start, 'end': end, }, [] def __repr__(self): return '<%s: %s>' % (self.__class__.__name__, self) def get_group_by_cols(self, alias=None): return [] def __str__(self): if self.start.value is not None and self.start.value < 0: start = '%d %s' % (abs(self.start.value), connection.ops.PRECEDING) elif self.start.value is not None and self.start.value == 0: start = connection.ops.CURRENT_ROW else: start = connection.ops.UNBOUNDED_PRECEDING if self.end.value is not None and self.end.value > 0: end = '%d %s' % (self.end.value, connection.ops.FOLLOWING) elif self.end.value is not None and self.end.value == 0: end = connection.ops.CURRENT_ROW else: end = connection.ops.UNBOUNDED_FOLLOWING return self.template % { 'frame_type': self.frame_type, 'start': start, 'end': end, } def window_frame_start_end(self, connection, start, end): raise NotImplementedError('Subclasses must implement window_frame_start_end().') class RowRange(WindowFrame): frame_type = 'ROWS' def window_frame_start_end(self, connection, start, end): return connection.ops.window_frame_rows_start_end(start, end) class ValueRange(WindowFrame): frame_type = 'RANGE' def window_frame_start_end(self, connection, start, end): return connection.ops.window_frame_range_start_end(start, end)
0d7a6e4dea00a87288cbddfafa7f760427a0cfb89d41e74692dc70531327f2df
import collections import re import warnings from itertools import chain from django.core.exceptions import EmptyResultSet, FieldError from django.db.models.constants import LOOKUP_SEP from django.db.models.expressions import OrderBy, Random, RawSQL, Ref, Value from django.db.models.functions import Cast from django.db.models.query_utils import QueryWrapper, select_related_descend from django.db.models.sql.constants import ( CURSOR, GET_ITERATOR_CHUNK_SIZE, MULTI, NO_RESULTS, ORDER_DIR, SINGLE, ) from django.db.models.sql.query import Query, get_order_dir from django.db.transaction import TransactionManagementError from django.db.utils import DatabaseError, NotSupportedError from django.utils.deprecation import RemovedInDjango31Warning from django.utils.hashable import make_hashable FORCE = object() class SQLCompiler: def __init__(self, query, connection, using): self.query = query self.connection = connection self.using = using self.quote_cache = {'*': '*'} # The select, klass_info, and annotations are needed by QuerySet.iterator() # these are set as a side-effect of executing the query. Note that we calculate # separately a list of extra select columns needed for grammatical correctness # of the query, but these columns are not included in self.select. self.select = None self.annotation_col_map = None self.klass_info = None # Multiline ordering SQL clause may appear from RawSQL. self.ordering_parts = re.compile(r'^(.*)\s(ASC|DESC)(.*)', re.MULTILINE | re.DOTALL) self._meta_ordering = None def setup_query(self): if all(self.query.alias_refcount[a] == 0 for a in self.query.alias_map): self.query.get_initial_alias() self.select, self.klass_info, self.annotation_col_map = self.get_select() self.col_count = len(self.select) def pre_sql_setup(self): """ Do any necessary class setup immediately prior to producing SQL. This is for things that can't necessarily be done in __init__ because we might not have all the pieces in place at that time. """ self.setup_query() order_by = self.get_order_by() self.where, self.having = self.query.where.split_having() extra_select = self.get_extra_select(order_by, self.select) self.has_extra_select = bool(extra_select) group_by = self.get_group_by(self.select + extra_select, order_by) return extra_select, order_by, group_by def get_group_by(self, select, order_by): """ Return a list of 2-tuples of form (sql, params). The logic of what exactly the GROUP BY clause contains is hard to describe in other words than "if it passes the test suite, then it is correct". """ # Some examples: # SomeModel.objects.annotate(Count('somecol')) # GROUP BY: all fields of the model # # SomeModel.objects.values('name').annotate(Count('somecol')) # GROUP BY: name # # SomeModel.objects.annotate(Count('somecol')).values('name') # GROUP BY: all cols of the model # # SomeModel.objects.values('name', 'pk').annotate(Count('somecol')).values('pk') # GROUP BY: name, pk # # SomeModel.objects.values('name').annotate(Count('somecol')).values('pk') # GROUP BY: name, pk # # In fact, the self.query.group_by is the minimal set to GROUP BY. It # can't be ever restricted to a smaller set, but additional columns in # HAVING, ORDER BY, and SELECT clauses are added to it. Unfortunately # the end result is that it is impossible to force the query to have # a chosen GROUP BY clause - you can almost do this by using the form: # .values(*wanted_cols).annotate(AnAggregate()) # but any later annotations, extra selects, values calls that # refer some column outside of the wanted_cols, order_by, or even # filter calls can alter the GROUP BY clause. # The query.group_by is either None (no GROUP BY at all), True # (group by select fields), or a list of expressions to be added # to the group by. if self.query.group_by is None: return [] expressions = [] if self.query.group_by is not True: # If the group by is set to a list (by .values() call most likely), # then we need to add everything in it to the GROUP BY clause. # Backwards compatibility hack for setting query.group_by. Remove # when we have public API way of forcing the GROUP BY clause. # Converts string references to expressions. for expr in self.query.group_by: if not hasattr(expr, 'as_sql'): expressions.append(self.query.resolve_ref(expr)) else: expressions.append(expr) # Note that even if the group_by is set, it is only the minimal # set to group by. So, we need to add cols in select, order_by, and # having into the select in any case. for expr, _, _ in select: cols = expr.get_group_by_cols() for col in cols: expressions.append(col) for expr, (sql, params, is_ref) in order_by: # Skip References to the select clause, as all expressions in the # select clause are already part of the group by. if not expr.contains_aggregate and not is_ref: expressions.extend(expr.get_source_expressions()) having_group_by = self.having.get_group_by_cols() if self.having else () for expr in having_group_by: expressions.append(expr) result = [] seen = set() expressions = self.collapse_group_by(expressions, having_group_by) for expr in expressions: sql, params = self.compile(expr) params_hash = make_hashable(params) if (sql, params_hash) not in seen: result.append((sql, params)) seen.add((sql, params_hash)) return result def collapse_group_by(self, expressions, having): # If the DB can group by primary key, then group by the primary key of # query's main model. Note that for PostgreSQL the GROUP BY clause must # include the primary key of every table, but for MySQL it is enough to # have the main table's primary key. if self.connection.features.allows_group_by_pk: # Determine if the main model's primary key is in the query. pk = None for expr in expressions: # Is this a reference to query's base table primary key? If the # expression isn't a Col-like, then skip the expression. if (getattr(expr, 'target', None) == self.query.model._meta.pk and getattr(expr, 'alias', None) == self.query.base_table): pk = expr break # If the main model's primary key is in the query, group by that # field, HAVING expressions, and expressions associated with tables # that don't have a primary key included in the grouped columns. if pk: pk_aliases = { expr.alias for expr in expressions if hasattr(expr, 'target') and expr.target.primary_key } expressions = [pk] + [ expr for expr in expressions if expr in having or ( getattr(expr, 'alias', None) is not None and expr.alias not in pk_aliases ) ] elif self.connection.features.allows_group_by_selected_pks: # Filter out all expressions associated with a table's primary key # present in the grouped columns. This is done by identifying all # tables that have their primary key included in the grouped # columns and removing non-primary key columns referring to them. # Unmanaged models are excluded because they could be representing # database views on which the optimization might not be allowed. pks = { expr for expr in expressions if hasattr(expr, 'target') and expr.target.primary_key and expr.target.model._meta.managed } aliases = {expr.alias for expr in pks} expressions = [ expr for expr in expressions if expr in pks or getattr(expr, 'alias', None) not in aliases ] return expressions def get_select(self): """ Return three values: - a list of 3-tuples of (expression, (sql, params), alias) - a klass_info structure, - a dictionary of annotations The (sql, params) is what the expression will produce, and alias is the "AS alias" for the column (possibly None). The klass_info structure contains the following information: - The base model of the query. - Which columns for that model are present in the query (by position of the select clause). - related_klass_infos: [f, klass_info] to descent into The annotations is a dictionary of {'attname': column position} values. """ select = [] klass_info = None annotations = {} select_idx = 0 for alias, (sql, params) in self.query.extra_select.items(): annotations[alias] = select_idx select.append((RawSQL(sql, params), alias)) select_idx += 1 assert not (self.query.select and self.query.default_cols) if self.query.default_cols: cols = self.get_default_columns() else: # self.query.select is a special case. These columns never go to # any model. cols = self.query.select if cols: select_list = [] for col in cols: select_list.append(select_idx) select.append((col, None)) select_idx += 1 klass_info = { 'model': self.query.model, 'select_fields': select_list, } for alias, annotation in self.query.annotation_select.items(): annotations[alias] = select_idx select.append((annotation, alias)) select_idx += 1 if self.query.select_related: related_klass_infos = self.get_related_selections(select) klass_info['related_klass_infos'] = related_klass_infos def get_select_from_parent(klass_info): for ki in klass_info['related_klass_infos']: if ki['from_parent']: ki['select_fields'] = (klass_info['select_fields'] + ki['select_fields']) get_select_from_parent(ki) get_select_from_parent(klass_info) ret = [] for col, alias in select: try: sql, params = self.compile(col, select_format=True) except EmptyResultSet: # Select a predicate that's always False. sql, params = '0', () ret.append((col, (sql, params), alias)) return ret, klass_info, annotations def get_order_by(self): """ Return a list of 2-tuples of form (expr, (sql, params, is_ref)) for the ORDER BY clause. The order_by clause can alter the select clause (for example it can add aliases to clauses that do not yet have one, or it can add totally new select clauses). """ if self.query.extra_order_by: ordering = self.query.extra_order_by elif not self.query.default_ordering: ordering = self.query.order_by elif self.query.order_by: ordering = self.query.order_by elif self.query.get_meta().ordering: ordering = self.query.get_meta().ordering self._meta_ordering = ordering else: ordering = [] if self.query.standard_ordering: asc, desc = ORDER_DIR['ASC'] else: asc, desc = ORDER_DIR['DESC'] order_by = [] for field in ordering: if hasattr(field, 'resolve_expression'): if isinstance(field, Value): # output_field must be resolved for constants. field = Cast(field, field.output_field) if not isinstance(field, OrderBy): field = field.asc() if not self.query.standard_ordering: field = field.copy() field.reverse_ordering() order_by.append((field, False)) continue if field == '?': # random order_by.append((OrderBy(Random()), False)) continue col, order = get_order_dir(field, asc) descending = order == 'DESC' if col in self.query.annotation_select: # Reference to expression in SELECT clause order_by.append(( OrderBy(Ref(col, self.query.annotation_select[col]), descending=descending), True)) continue if col in self.query.annotations: # References to an expression which is masked out of the SELECT # clause. expr = self.query.annotations[col] if isinstance(expr, Value): # output_field must be resolved for constants. expr = Cast(expr, expr.output_field) order_by.append((OrderBy(expr, descending=descending), False)) continue if '.' in field: # This came in through an extra(order_by=...) addition. Pass it # on verbatim. table, col = col.split('.', 1) order_by.append(( OrderBy( RawSQL('%s.%s' % (self.quote_name_unless_alias(table), col), []), descending=descending ), False)) continue if not self.query.extra or col not in self.query.extra: # 'col' is of the form 'field' or 'field1__field2' or # '-field1__field2__field', etc. order_by.extend(self.find_ordering_name( field, self.query.get_meta(), default_order=asc)) else: if col not in self.query.extra_select: order_by.append(( OrderBy(RawSQL(*self.query.extra[col]), descending=descending), False)) else: order_by.append(( OrderBy(Ref(col, RawSQL(*self.query.extra[col])), descending=descending), True)) result = [] seen = set() for expr, is_ref in order_by: resolved = expr.resolve_expression(self.query, allow_joins=True, reuse=None) if self.query.combinator: src = resolved.get_source_expressions()[0] # Relabel order by columns to raw numbers if this is a combined # query; necessary since the columns can't be referenced by the # fully qualified name and the simple column names may collide. for idx, (sel_expr, _, col_alias) in enumerate(self.select): if is_ref and col_alias == src.refs: src = src.source elif col_alias: continue if src == sel_expr: resolved.set_source_expressions([RawSQL('%d' % (idx + 1), ())]) break else: if col_alias: raise DatabaseError('ORDER BY term does not match any column in the result set.') # Add column used in ORDER BY clause without an alias to # the selected columns. self.query.add_select_col(src) resolved.set_source_expressions([RawSQL('%d' % len(self.query.select), ())]) sql, params = self.compile(resolved) # Don't add the same column twice, but the order direction is # not taken into account so we strip it. When this entire method # is refactored into expressions, then we can check each part as we # generate it. without_ordering = self.ordering_parts.search(sql).group(1) params_hash = make_hashable(params) if (without_ordering, params_hash) in seen: continue seen.add((without_ordering, params_hash)) result.append((resolved, (sql, params, is_ref))) return result def get_extra_select(self, order_by, select): extra_select = [] if self.query.distinct and not self.query.distinct_fields: select_sql = [t[1] for t in select] for expr, (sql, params, is_ref) in order_by: without_ordering = self.ordering_parts.search(sql).group(1) if not is_ref and (without_ordering, params) not in select_sql: extra_select.append((expr, (without_ordering, params), None)) return extra_select def quote_name_unless_alias(self, name): """ A wrapper around connection.ops.quote_name that doesn't quote aliases for table names. This avoids problems with some SQL dialects that treat quoted strings specially (e.g. PostgreSQL). """ if name in self.quote_cache: return self.quote_cache[name] if ((name in self.query.alias_map and name not in self.query.table_map) or name in self.query.extra_select or ( name in self.query.external_aliases and name not in self.query.table_map)): self.quote_cache[name] = name return name r = self.connection.ops.quote_name(name) self.quote_cache[name] = r return r def compile(self, node, select_format=False): vendor_impl = getattr(node, 'as_' + self.connection.vendor, None) if vendor_impl: sql, params = vendor_impl(self, self.connection) else: sql, params = node.as_sql(self, self.connection) if select_format is FORCE or (select_format and not self.query.subquery): return node.output_field.select_format(self, sql, params) return sql, params def get_combinator_sql(self, combinator, all): features = self.connection.features compilers = [ query.get_compiler(self.using, self.connection) for query in self.query.combined_queries if not query.is_empty() ] if not features.supports_slicing_ordering_in_compound: for query, compiler in zip(self.query.combined_queries, compilers): if query.low_mark or query.high_mark: raise DatabaseError('LIMIT/OFFSET not allowed in subqueries of compound statements.') if compiler.get_order_by(): raise DatabaseError('ORDER BY not allowed in subqueries of compound statements.') parts = () for compiler in compilers: try: # If the columns list is limited, then all combined queries # must have the same columns list. Set the selects defined on # the query on all combined queries, if not already set. if not compiler.query.values_select and self.query.values_select: compiler.query = compiler.query.clone() compiler.query.set_values(( *self.query.extra_select, *self.query.values_select, *self.query.annotation_select, )) part_sql, part_args = compiler.as_sql() if compiler.query.combinator: # Wrap in a subquery if wrapping in parentheses isn't # supported. if not features.supports_parentheses_in_compound: part_sql = 'SELECT * FROM ({})'.format(part_sql) # Add parentheses when combining with compound query if not # already added for all compound queries. elif not features.supports_slicing_ordering_in_compound: part_sql = '({})'.format(part_sql) parts += ((part_sql, part_args),) except EmptyResultSet: # Omit the empty queryset with UNION and with DIFFERENCE if the # first queryset is nonempty. if combinator == 'union' or (combinator == 'difference' and parts): continue raise if not parts: raise EmptyResultSet combinator_sql = self.connection.ops.set_operators[combinator] if all and combinator == 'union': combinator_sql += ' ALL' braces = '({})' if features.supports_slicing_ordering_in_compound else '{}' sql_parts, args_parts = zip(*((braces.format(sql), args) for sql, args in parts)) result = [' {} '.format(combinator_sql).join(sql_parts)] params = [] for part in args_parts: params.extend(part) return result, params def as_sql(self, with_limits=True, with_col_aliases=False): """ Create the SQL for this query. Return the SQL string and list of parameters. If 'with_limits' is False, any limit/offset information is not included in the query. """ refcounts_before = self.query.alias_refcount.copy() try: extra_select, order_by, group_by = self.pre_sql_setup() for_update_part = None # Is a LIMIT/OFFSET clause needed? with_limit_offset = with_limits and (self.query.high_mark is not None or self.query.low_mark) combinator = self.query.combinator features = self.connection.features if combinator: if not getattr(features, 'supports_select_{}'.format(combinator)): raise NotSupportedError('{} is not supported on this database backend.'.format(combinator)) result, params = self.get_combinator_sql(combinator, self.query.combinator_all) else: distinct_fields, distinct_params = self.get_distinct() # This must come after 'select', 'ordering', and 'distinct' # (see docstring of get_from_clause() for details). from_, f_params = self.get_from_clause() where, w_params = self.compile(self.where) if self.where is not None else ("", []) having, h_params = self.compile(self.having) if self.having is not None else ("", []) result = ['SELECT'] params = [] if self.query.distinct: distinct_result, distinct_params = self.connection.ops.distinct_sql( distinct_fields, distinct_params, ) result += distinct_result params += distinct_params out_cols = [] col_idx = 1 for _, (s_sql, s_params), alias in self.select + extra_select: if alias: s_sql = '%s AS %s' % (s_sql, self.connection.ops.quote_name(alias)) elif with_col_aliases: s_sql = '%s AS %s' % (s_sql, 'Col%d' % col_idx) col_idx += 1 params.extend(s_params) out_cols.append(s_sql) result += [', '.join(out_cols), 'FROM', *from_] params.extend(f_params) if self.query.select_for_update and self.connection.features.has_select_for_update: if self.connection.get_autocommit(): raise TransactionManagementError('select_for_update cannot be used outside of a transaction.') if with_limit_offset and not self.connection.features.supports_select_for_update_with_limit: raise NotSupportedError( 'LIMIT/OFFSET is not supported with ' 'select_for_update on this database backend.' ) nowait = self.query.select_for_update_nowait skip_locked = self.query.select_for_update_skip_locked of = self.query.select_for_update_of # If it's a NOWAIT/SKIP LOCKED/OF query but the backend # doesn't support it, raise NotSupportedError to prevent a # possible deadlock. if nowait and not self.connection.features.has_select_for_update_nowait: raise NotSupportedError('NOWAIT is not supported on this database backend.') elif skip_locked and not self.connection.features.has_select_for_update_skip_locked: raise NotSupportedError('SKIP LOCKED is not supported on this database backend.') elif of and not self.connection.features.has_select_for_update_of: raise NotSupportedError('FOR UPDATE OF is not supported on this database backend.') for_update_part = self.connection.ops.for_update_sql( nowait=nowait, skip_locked=skip_locked, of=self.get_select_for_update_of_arguments(), ) if for_update_part and self.connection.features.for_update_after_from: result.append(for_update_part) if where: result.append('WHERE %s' % where) params.extend(w_params) grouping = [] for g_sql, g_params in group_by: grouping.append(g_sql) params.extend(g_params) if grouping: if distinct_fields: raise NotImplementedError('annotate() + distinct(fields) is not implemented.') order_by = order_by or self.connection.ops.force_no_ordering() result.append('GROUP BY %s' % ', '.join(grouping)) if self._meta_ordering: # When the deprecation ends, replace with: # order_by = None warnings.warn( "%s QuerySet won't use Meta.ordering in Django 3.1. " "Add .order_by(%s) to retain the current query." % ( self.query.model.__name__, ', '.join(repr(f) for f in self._meta_ordering), ), RemovedInDjango31Warning, stacklevel=4, ) if having: result.append('HAVING %s' % having) params.extend(h_params) if self.query.explain_query: result.insert(0, self.connection.ops.explain_query_prefix( self.query.explain_format, **self.query.explain_options )) if order_by: ordering = [] for _, (o_sql, o_params, _) in order_by: ordering.append(o_sql) params.extend(o_params) result.append('ORDER BY %s' % ', '.join(ordering)) if with_limit_offset: result.append(self.connection.ops.limit_offset_sql(self.query.low_mark, self.query.high_mark)) if for_update_part and not self.connection.features.for_update_after_from: result.append(for_update_part) if self.query.subquery and extra_select: # If the query is used as a subquery, the extra selects would # result in more columns than the left-hand side expression is # expecting. This can happen when a subquery uses a combination # of order_by() and distinct(), forcing the ordering expressions # to be selected as well. Wrap the query in another subquery # to exclude extraneous selects. sub_selects = [] sub_params = [] for index, (select, _, alias) in enumerate(self.select, start=1): if not alias and with_col_aliases: alias = 'col%d' % index if alias: sub_selects.append("%s.%s" % ( self.connection.ops.quote_name('subquery'), self.connection.ops.quote_name(alias), )) else: select_clone = select.relabeled_clone({select.alias: 'subquery'}) subselect, subparams = select_clone.as_sql(self, self.connection) sub_selects.append(subselect) sub_params.extend(subparams) return 'SELECT %s FROM (%s) subquery' % ( ', '.join(sub_selects), ' '.join(result), ), tuple(sub_params + params) return ' '.join(result), tuple(params) finally: # Finally do cleanup - get rid of the joins we created above. self.query.reset_refcounts(refcounts_before) def get_default_columns(self, start_alias=None, opts=None, from_parent=None): """ Compute the default columns for selecting every field in the base model. Will sometimes be called to pull in related models (e.g. via select_related), in which case "opts" and "start_alias" will be given to provide a starting point for the traversal. Return a list of strings, quoted appropriately for use in SQL directly, as well as a set of aliases used in the select statement (if 'as_pairs' is True, return a list of (alias, col_name) pairs instead of strings as the first component and None as the second component). """ result = [] if opts is None: opts = self.query.get_meta() only_load = self.deferred_to_columns() start_alias = start_alias or self.query.get_initial_alias() # The 'seen_models' is used to optimize checking the needed parent # alias for a given field. This also includes None -> start_alias to # be used by local fields. seen_models = {None: start_alias} for field in opts.concrete_fields: model = field.model._meta.concrete_model # A proxy model will have a different model and concrete_model. We # will assign None if the field belongs to this model. if model == opts.model: model = None if from_parent and model is not None and issubclass( from_parent._meta.concrete_model, model._meta.concrete_model): # Avoid loading data for already loaded parents. # We end up here in the case select_related() resolution # proceeds from parent model to child model. In that case the # parent model data is already present in the SELECT clause, # and we want to avoid reloading the same data again. continue if field.model in only_load and field.attname not in only_load[field.model]: continue alias = self.query.join_parent_model(opts, model, start_alias, seen_models) column = field.get_col(alias) result.append(column) return result def get_distinct(self): """ Return a quoted list of fields to use in DISTINCT ON part of the query. This method can alter the tables in the query, and thus it must be called before get_from_clause(). """ result = [] params = [] opts = self.query.get_meta() for name in self.query.distinct_fields: parts = name.split(LOOKUP_SEP) _, targets, alias, joins, path, _, transform_function = self._setup_joins(parts, opts, None) targets, alias, _ = self.query.trim_joins(targets, joins, path) for target in targets: if name in self.query.annotation_select: result.append(name) else: r, p = self.compile(transform_function(target, alias)) result.append(r) params.append(p) return result, params def find_ordering_name(self, name, opts, alias=None, default_order='ASC', already_seen=None): """ Return the table alias (the name might be ambiguous, the alias will not be) and column name for ordering by the given 'name' parameter. The 'name' is of the form 'field1__field2__...__fieldN'. """ name, order = get_order_dir(name, default_order) descending = order == 'DESC' pieces = name.split(LOOKUP_SEP) field, targets, alias, joins, path, opts, transform_function = self._setup_joins(pieces, opts, alias) # If we get to this point and the field is a relation to another model, # append the default ordering for that model unless the attribute name # of the field is specified. if field.is_relation and opts.ordering and getattr(field, 'attname', None) != name: # Firstly, avoid infinite loops. already_seen = already_seen or set() join_tuple = tuple(getattr(self.query.alias_map[j], 'join_cols', None) for j in joins) if join_tuple in already_seen: raise FieldError('Infinite loop caused by ordering.') already_seen.add(join_tuple) results = [] for item in opts.ordering: results.extend(self.find_ordering_name(item, opts, alias, order, already_seen)) return results targets, alias, _ = self.query.trim_joins(targets, joins, path) return [(OrderBy(transform_function(t, alias), descending=descending), False) for t in targets] def _setup_joins(self, pieces, opts, alias): """ Helper method for get_order_by() and get_distinct(). get_ordering() and get_distinct() must produce same target columns on same input, as the prefixes of get_ordering() and get_distinct() must match. Executing SQL where this is not true is an error. """ alias = alias or self.query.get_initial_alias() field, targets, opts, joins, path, transform_function = self.query.setup_joins(pieces, opts, alias) alias = joins[-1] return field, targets, alias, joins, path, opts, transform_function def get_from_clause(self): """ Return a list of strings that are joined together to go after the "FROM" part of the query, as well as a list any extra parameters that need to be included. Subclasses, can override this to create a from-clause via a "select". This should only be called after any SQL construction methods that might change the tables that are needed. This means the select columns, ordering, and distinct must be done first. """ result = [] params = [] for alias in tuple(self.query.alias_map): if not self.query.alias_refcount[alias]: continue try: from_clause = self.query.alias_map[alias] except KeyError: # Extra tables can end up in self.tables, but not in the # alias_map if they aren't in a join. That's OK. We skip them. continue clause_sql, clause_params = self.compile(from_clause) result.append(clause_sql) params.extend(clause_params) for t in self.query.extra_tables: alias, _ = self.query.table_alias(t) # Only add the alias if it's not already present (the table_alias() # call increments the refcount, so an alias refcount of one means # this is the only reference). if alias not in self.query.alias_map or self.query.alias_refcount[alias] == 1: result.append(', %s' % self.quote_name_unless_alias(alias)) return result, params def get_related_selections(self, select, opts=None, root_alias=None, cur_depth=1, requested=None, restricted=None): """ Fill in the information needed for a select_related query. The current depth is measured as the number of connections away from the root model (for example, cur_depth=1 means we are looking at models with direct connections to the root model). """ def _get_field_choices(): direct_choices = (f.name for f in opts.fields if f.is_relation) reverse_choices = ( f.field.related_query_name() for f in opts.related_objects if f.field.unique ) return chain(direct_choices, reverse_choices, self.query._filtered_relations) related_klass_infos = [] if not restricted and cur_depth > self.query.max_depth: # We've recursed far enough; bail out. return related_klass_infos if not opts: opts = self.query.get_meta() root_alias = self.query.get_initial_alias() only_load = self.query.get_loaded_field_names() # Setup for the case when only particular related fields should be # included in the related selection. fields_found = set() if requested is None: restricted = isinstance(self.query.select_related, dict) if restricted: requested = self.query.select_related def get_related_klass_infos(klass_info, related_klass_infos): klass_info['related_klass_infos'] = related_klass_infos for f in opts.fields: field_model = f.model._meta.concrete_model fields_found.add(f.name) if restricted: next = requested.get(f.name, {}) if not f.is_relation: # If a non-related field is used like a relation, # or if a single non-relational field is given. if next or f.name in requested: raise FieldError( "Non-relational field given in select_related: '%s'. " "Choices are: %s" % ( f.name, ", ".join(_get_field_choices()) or '(none)', ) ) else: next = False if not select_related_descend(f, restricted, requested, only_load.get(field_model)): continue klass_info = { 'model': f.remote_field.model, 'field': f, 'reverse': False, 'local_setter': f.set_cached_value, 'remote_setter': f.remote_field.set_cached_value if f.unique else lambda x, y: None, 'from_parent': False, } related_klass_infos.append(klass_info) select_fields = [] _, _, _, joins, _, _ = self.query.setup_joins( [f.name], opts, root_alias) alias = joins[-1] columns = self.get_default_columns(start_alias=alias, opts=f.remote_field.model._meta) for col in columns: select_fields.append(len(select)) select.append((col, None)) klass_info['select_fields'] = select_fields next_klass_infos = self.get_related_selections( select, f.remote_field.model._meta, alias, cur_depth + 1, next, restricted) get_related_klass_infos(klass_info, next_klass_infos) if restricted: related_fields = [ (o.field, o.related_model) for o in opts.related_objects if o.field.unique and not o.many_to_many ] for f, model in related_fields: if not select_related_descend(f, restricted, requested, only_load.get(model), reverse=True): continue related_field_name = f.related_query_name() fields_found.add(related_field_name) join_info = self.query.setup_joins([related_field_name], opts, root_alias) alias = join_info.joins[-1] from_parent = issubclass(model, opts.model) and model is not opts.model klass_info = { 'model': model, 'field': f, 'reverse': True, 'local_setter': f.remote_field.set_cached_value, 'remote_setter': f.set_cached_value, 'from_parent': from_parent, } related_klass_infos.append(klass_info) select_fields = [] columns = self.get_default_columns( start_alias=alias, opts=model._meta, from_parent=opts.model) for col in columns: select_fields.append(len(select)) select.append((col, None)) klass_info['select_fields'] = select_fields next = requested.get(f.related_query_name(), {}) next_klass_infos = self.get_related_selections( select, model._meta, alias, cur_depth + 1, next, restricted) get_related_klass_infos(klass_info, next_klass_infos) for name in list(requested): # Filtered relations work only on the topmost level. if cur_depth > 1: break if name in self.query._filtered_relations: fields_found.add(name) f, _, join_opts, joins, _, _ = self.query.setup_joins([name], opts, root_alias) model = join_opts.model alias = joins[-1] from_parent = issubclass(model, opts.model) and model is not opts.model def local_setter(obj, from_obj): # Set a reverse fk object when relation is non-empty. if from_obj: f.remote_field.set_cached_value(from_obj, obj) def remote_setter(obj, from_obj): setattr(from_obj, name, obj) klass_info = { 'model': model, 'field': f, 'reverse': True, 'local_setter': local_setter, 'remote_setter': remote_setter, 'from_parent': from_parent, } related_klass_infos.append(klass_info) select_fields = [] columns = self.get_default_columns( start_alias=alias, opts=model._meta, from_parent=opts.model, ) for col in columns: select_fields.append(len(select)) select.append((col, None)) klass_info['select_fields'] = select_fields next_requested = requested.get(name, {}) next_klass_infos = self.get_related_selections( select, opts=model._meta, root_alias=alias, cur_depth=cur_depth + 1, requested=next_requested, restricted=restricted, ) get_related_klass_infos(klass_info, next_klass_infos) fields_not_found = set(requested).difference(fields_found) if fields_not_found: invalid_fields = ("'%s'" % s for s in fields_not_found) raise FieldError( 'Invalid field name(s) given in select_related: %s. ' 'Choices are: %s' % ( ', '.join(invalid_fields), ', '.join(_get_field_choices()) or '(none)', ) ) return related_klass_infos def get_select_for_update_of_arguments(self): """ Return a quoted list of arguments for the SELECT FOR UPDATE OF part of the query. """ def _get_field_choices(): """Yield all allowed field paths in breadth-first search order.""" queue = collections.deque([(None, self.klass_info)]) while queue: parent_path, klass_info = queue.popleft() if parent_path is None: path = [] yield 'self' else: field = klass_info['field'] if klass_info['reverse']: field = field.remote_field path = parent_path + [field.name] yield LOOKUP_SEP.join(path) queue.extend( (path, klass_info) for klass_info in klass_info.get('related_klass_infos', []) ) result = [] invalid_names = [] for name in self.query.select_for_update_of: parts = [] if name == 'self' else name.split(LOOKUP_SEP) klass_info = self.klass_info for part in parts: for related_klass_info in klass_info.get('related_klass_infos', []): field = related_klass_info['field'] if related_klass_info['reverse']: field = field.remote_field if field.name == part: klass_info = related_klass_info break else: klass_info = None break if klass_info is None: invalid_names.append(name) continue select_index = klass_info['select_fields'][0] col = self.select[select_index][0] if self.connection.features.select_for_update_of_column: result.append(self.compile(col)[0]) else: result.append(self.quote_name_unless_alias(col.alias)) if invalid_names: raise FieldError( 'Invalid field name(s) given in select_for_update(of=(...)): %s. ' 'Only relational fields followed in the query are allowed. ' 'Choices are: %s.' % ( ', '.join(invalid_names), ', '.join(_get_field_choices()), ) ) return result def deferred_to_columns(self): """ Convert the self.deferred_loading data structure to mapping of table names to sets of column names which are to be loaded. Return the dictionary. """ columns = {} self.query.deferred_to_data(columns, self.query.get_loaded_field_names_cb) return columns def get_converters(self, expressions): converters = {} for i, expression in enumerate(expressions): if expression: backend_converters = self.connection.ops.get_db_converters(expression) field_converters = expression.get_db_converters(self.connection) if backend_converters or field_converters: converters[i] = (backend_converters + field_converters, expression) return converters def apply_converters(self, rows, converters): connection = self.connection converters = list(converters.items()) for row in map(list, rows): for pos, (convs, expression) in converters: value = row[pos] for converter in convs: value = converter(value, expression, connection) row[pos] = value yield row def results_iter(self, results=None, tuple_expected=False, chunked_fetch=False, chunk_size=GET_ITERATOR_CHUNK_SIZE): """Return an iterator over the results from executing this query.""" if results is None: results = self.execute_sql(MULTI, chunked_fetch=chunked_fetch, chunk_size=chunk_size) fields = [s[0] for s in self.select[0:self.col_count]] converters = self.get_converters(fields) rows = chain.from_iterable(results) if converters: rows = self.apply_converters(rows, converters) if tuple_expected: rows = map(tuple, rows) return rows def has_results(self): """ Backends (e.g. NoSQL) can override this in order to use optimized versions of "query has any results." """ # This is always executed on a query clone, so we can modify self.query self.query.add_extra({'a': 1}, None, None, None, None, None) self.query.set_extra_mask(['a']) return bool(self.execute_sql(SINGLE)) def execute_sql(self, result_type=MULTI, chunked_fetch=False, chunk_size=GET_ITERATOR_CHUNK_SIZE): """ Run the query against the database and return the result(s). The return value is a single data item if result_type is SINGLE, or an iterator over the results if the result_type is MULTI. result_type is either MULTI (use fetchmany() to retrieve all rows), SINGLE (only retrieve a single row), or None. In this last case, the cursor is returned if any query is executed, since it's used by subclasses such as InsertQuery). It's possible, however, that no query is needed, as the filters describe an empty set. In that case, None is returned, to avoid any unnecessary database interaction. """ result_type = result_type or NO_RESULTS try: sql, params = self.as_sql() if not sql: raise EmptyResultSet except EmptyResultSet: if result_type == MULTI: return iter([]) else: return if chunked_fetch: cursor = self.connection.chunked_cursor() else: cursor = self.connection.cursor() try: cursor.execute(sql, params) except Exception: # Might fail for server-side cursors (e.g. connection closed) cursor.close() raise if result_type == CURSOR: # Give the caller the cursor to process and close. return cursor if result_type == SINGLE: try: val = cursor.fetchone() if val: return val[0:self.col_count] return val finally: # done with the cursor cursor.close() if result_type == NO_RESULTS: cursor.close() return result = cursor_iter( cursor, self.connection.features.empty_fetchmany_value, self.col_count if self.has_extra_select else None, chunk_size, ) if not chunked_fetch or not self.connection.features.can_use_chunked_reads: try: # If we are using non-chunked reads, we return the same data # structure as normally, but ensure it is all read into memory # before going any further. Use chunked_fetch if requested, # unless the database doesn't support it. return list(result) finally: # done with the cursor cursor.close() return result def as_subquery_condition(self, alias, columns, compiler): qn = compiler.quote_name_unless_alias qn2 = self.connection.ops.quote_name for index, select_col in enumerate(self.query.select): lhs_sql, lhs_params = self.compile(select_col) rhs = '%s.%s' % (qn(alias), qn2(columns[index])) self.query.where.add( QueryWrapper('%s = %s' % (lhs_sql, rhs), lhs_params), 'AND') sql, params = self.as_sql() return 'EXISTS (%s)' % sql, params def explain_query(self): result = list(self.execute_sql()) # Some backends return 1 item tuples with strings, and others return # tuples with integers and strings. Flatten them out into strings. for row in result[0]: if not isinstance(row, str): yield ' '.join(str(c) for c in row) else: yield row class SQLInsertCompiler(SQLCompiler): return_id = False def field_as_sql(self, field, val): """ Take a field and a value intended to be saved on that field, and return placeholder SQL and accompanying params. Check for raw values, expressions, and fields with get_placeholder() defined in that order. When field is None, consider the value raw and use it as the placeholder, with no corresponding parameters returned. """ if field is None: # A field value of None means the value is raw. sql, params = val, [] elif hasattr(val, 'as_sql'): # This is an expression, let's compile it. sql, params = self.compile(val) elif hasattr(field, 'get_placeholder'): # Some fields (e.g. geo fields) need special munging before # they can be inserted. sql, params = field.get_placeholder(val, self, self.connection), [val] else: # Return the common case for the placeholder sql, params = '%s', [val] # The following hook is only used by Oracle Spatial, which sometimes # needs to yield 'NULL' and [] as its placeholder and params instead # of '%s' and [None]. The 'NULL' placeholder is produced earlier by # OracleOperations.get_geom_placeholder(). The following line removes # the corresponding None parameter. See ticket #10888. params = self.connection.ops.modify_insert_params(sql, params) return sql, params def prepare_value(self, field, value): """ Prepare a value to be used in a query by resolving it if it is an expression and otherwise calling the field's get_db_prep_save(). """ if hasattr(value, 'resolve_expression'): value = value.resolve_expression(self.query, allow_joins=False, for_save=True) # Don't allow values containing Col expressions. They refer to # existing columns on a row, but in the case of insert the row # doesn't exist yet. if value.contains_column_references: raise ValueError( 'Failed to insert expression "%s" on %s. F() expressions ' 'can only be used to update, not to insert.' % (value, field) ) if value.contains_aggregate: raise FieldError( 'Aggregate functions are not allowed in this query ' '(%s=%r).' % (field.name, value) ) if value.contains_over_clause: raise FieldError( 'Window expressions are not allowed in this query (%s=%r).' % (field.name, value) ) else: value = field.get_db_prep_save(value, connection=self.connection) return value def pre_save_val(self, field, obj): """ Get the given field's value off the given obj. pre_save() is used for things like auto_now on DateTimeField. Skip it if this is a raw query. """ if self.query.raw: return getattr(obj, field.attname) return field.pre_save(obj, add=True) def assemble_as_sql(self, fields, value_rows): """ Take a sequence of N fields and a sequence of M rows of values, and generate placeholder SQL and parameters for each field and value. Return a pair containing: * a sequence of M rows of N SQL placeholder strings, and * a sequence of M rows of corresponding parameter values. Each placeholder string may contain any number of '%s' interpolation strings, and each parameter row will contain exactly as many params as the total number of '%s's in the corresponding placeholder row. """ if not value_rows: return [], [] # list of (sql, [params]) tuples for each object to be saved # Shape: [n_objs][n_fields][2] rows_of_fields_as_sql = ( (self.field_as_sql(field, v) for field, v in zip(fields, row)) for row in value_rows ) # tuple like ([sqls], [[params]s]) for each object to be saved # Shape: [n_objs][2][n_fields] sql_and_param_pair_rows = (zip(*row) for row in rows_of_fields_as_sql) # Extract separate lists for placeholders and params. # Each of these has shape [n_objs][n_fields] placeholder_rows, param_rows = zip(*sql_and_param_pair_rows) # Params for each field are still lists, and need to be flattened. param_rows = [[p for ps in row for p in ps] for row in param_rows] return placeholder_rows, param_rows def as_sql(self): # We don't need quote_name_unless_alias() here, since these are all # going to be column names (so we can avoid the extra overhead). qn = self.connection.ops.quote_name opts = self.query.get_meta() insert_statement = self.connection.ops.insert_statement(ignore_conflicts=self.query.ignore_conflicts) result = ['%s %s' % (insert_statement, qn(opts.db_table))] fields = self.query.fields or [opts.pk] result.append('(%s)' % ', '.join(qn(f.column) for f in fields)) if self.query.fields: value_rows = [ [self.prepare_value(field, self.pre_save_val(field, obj)) for field in fields] for obj in self.query.objs ] else: # An empty object. value_rows = [[self.connection.ops.pk_default_value()] for _ in self.query.objs] fields = [None] # Currently the backends just accept values when generating bulk # queries and generate their own placeholders. Doing that isn't # necessary and it should be possible to use placeholders and # expressions in bulk inserts too. can_bulk = (not self.return_id and self.connection.features.has_bulk_insert) placeholder_rows, param_rows = self.assemble_as_sql(fields, value_rows) ignore_conflicts_suffix_sql = self.connection.ops.ignore_conflicts_suffix_sql( ignore_conflicts=self.query.ignore_conflicts ) if self.return_id and self.connection.features.can_return_columns_from_insert: if self.connection.features.can_return_rows_from_bulk_insert: result.append(self.connection.ops.bulk_insert_sql(fields, placeholder_rows)) params = param_rows else: result.append("VALUES (%s)" % ", ".join(placeholder_rows[0])) params = [param_rows[0]] if ignore_conflicts_suffix_sql: result.append(ignore_conflicts_suffix_sql) col = "%s.%s" % (qn(opts.db_table), qn(opts.pk.column)) r_fmt, r_params = self.connection.ops.return_insert_id(opts.pk) # Skip empty r_fmt to allow subclasses to customize behavior for # 3rd party backends. Refs #19096. if r_fmt: result.append(r_fmt % col) params += [r_params] return [(" ".join(result), tuple(chain.from_iterable(params)))] if can_bulk: result.append(self.connection.ops.bulk_insert_sql(fields, placeholder_rows)) if ignore_conflicts_suffix_sql: result.append(ignore_conflicts_suffix_sql) return [(" ".join(result), tuple(p for ps in param_rows for p in ps))] else: if ignore_conflicts_suffix_sql: result.append(ignore_conflicts_suffix_sql) return [ (" ".join(result + ["VALUES (%s)" % ", ".join(p)]), vals) for p, vals in zip(placeholder_rows, param_rows) ] def execute_sql(self, return_id=False): assert not ( return_id and len(self.query.objs) != 1 and not self.connection.features.can_return_rows_from_bulk_insert ) self.return_id = return_id with self.connection.cursor() as cursor: for sql, params in self.as_sql(): cursor.execute(sql, params) if not return_id: return if self.connection.features.can_return_rows_from_bulk_insert and len(self.query.objs) > 1: return self.connection.ops.fetch_returned_insert_ids(cursor) if self.connection.features.can_return_columns_from_insert: assert len(self.query.objs) == 1 return self.connection.ops.fetch_returned_insert_id(cursor) return self.connection.ops.last_insert_id( cursor, self.query.get_meta().db_table, self.query.get_meta().pk.column ) class SQLDeleteCompiler(SQLCompiler): def as_sql(self): """ Create the SQL for this query. Return the SQL string and list of parameters. """ assert len([t for t in self.query.alias_map if self.query.alias_refcount[t] > 0]) == 1, \ "Can only delete from one table at a time." qn = self.quote_name_unless_alias result = ['DELETE FROM %s' % qn(self.query.base_table)] where, params = self.compile(self.query.where) if where: result.append('WHERE %s' % where) return ' '.join(result), tuple(params) class SQLUpdateCompiler(SQLCompiler): def as_sql(self): """ Create the SQL for this query. Return the SQL string and list of parameters. """ self.pre_sql_setup() if not self.query.values: return '', () qn = self.quote_name_unless_alias values, update_params = [], [] for field, model, val in self.query.values: if hasattr(val, 'resolve_expression'): val = val.resolve_expression(self.query, allow_joins=False, for_save=True) if val.contains_aggregate: raise FieldError( 'Aggregate functions are not allowed in this query ' '(%s=%r).' % (field.name, val) ) if val.contains_over_clause: raise FieldError( 'Window expressions are not allowed in this query ' '(%s=%r).' % (field.name, val) ) elif hasattr(val, 'prepare_database_save'): if field.remote_field: val = field.get_db_prep_save( val.prepare_database_save(field), connection=self.connection, ) else: raise TypeError( "Tried to update field %s with a model instance, %r. " "Use a value compatible with %s." % (field, val, field.__class__.__name__) ) else: val = field.get_db_prep_save(val, connection=self.connection) # Getting the placeholder for the field. if hasattr(field, 'get_placeholder'): placeholder = field.get_placeholder(val, self, self.connection) else: placeholder = '%s' name = field.column if hasattr(val, 'as_sql'): sql, params = self.compile(val) values.append('%s = %s' % (qn(name), placeholder % sql)) update_params.extend(params) elif val is not None: values.append('%s = %s' % (qn(name), placeholder)) update_params.append(val) else: values.append('%s = NULL' % qn(name)) table = self.query.base_table result = [ 'UPDATE %s SET' % qn(table), ', '.join(values), ] where, params = self.compile(self.query.where) if where: result.append('WHERE %s' % where) return ' '.join(result), tuple(update_params + params) def execute_sql(self, result_type): """ Execute the specified update. Return the number of rows affected by the primary update query. The "primary update query" is the first non-empty query that is executed. Row counts for any subsequent, related queries are not available. """ cursor = super().execute_sql(result_type) try: rows = cursor.rowcount if cursor else 0 is_empty = cursor is None finally: if cursor: cursor.close() for query in self.query.get_related_updates(): aux_rows = query.get_compiler(self.using).execute_sql(result_type) if is_empty and aux_rows: rows = aux_rows is_empty = False return rows def pre_sql_setup(self): """ If the update depends on results from other tables, munge the "where" conditions to match the format required for (portable) SQL updates. If multiple updates are required, pull out the id values to update at this point so that they don't change as a result of the progressive updates. """ refcounts_before = self.query.alias_refcount.copy() # Ensure base table is in the query self.query.get_initial_alias() count = self.query.count_active_tables() if not self.query.related_updates and count == 1: return query = self.query.chain(klass=Query) query.select_related = False query.clear_ordering(True) query.extra = {} query.select = [] query.add_fields([query.get_meta().pk.name]) super().pre_sql_setup() must_pre_select = count > 1 and not self.connection.features.update_can_self_select # Now we adjust the current query: reset the where clause and get rid # of all the tables we don't need (since they're in the sub-select). self.query.where = self.query.where_class() if self.query.related_updates or must_pre_select: # Either we're using the idents in multiple update queries (so # don't want them to change), or the db backend doesn't support # selecting from the updating table (e.g. MySQL). idents = [] for rows in query.get_compiler(self.using).execute_sql(MULTI): idents.extend(r[0] for r in rows) self.query.add_filter(('pk__in', idents)) self.query.related_ids = idents else: # The fast path. Filters and updates in one query. self.query.add_filter(('pk__in', query)) self.query.reset_refcounts(refcounts_before) class SQLAggregateCompiler(SQLCompiler): def as_sql(self): """ Create the SQL for this query. Return the SQL string and list of parameters. """ sql, params = [], [] for annotation in self.query.annotation_select.values(): ann_sql, ann_params = self.compile(annotation, select_format=FORCE) sql.append(ann_sql) params.extend(ann_params) self.col_count = len(self.query.annotation_select) sql = ', '.join(sql) params = tuple(params) sql = 'SELECT %s FROM (%s) subquery' % (sql, self.query.subquery) params = params + self.query.sub_params return sql, params def cursor_iter(cursor, sentinel, col_count, itersize): """ Yield blocks of rows from a cursor and ensure the cursor is closed when done. """ try: for rows in iter((lambda: cursor.fetchmany(itersize)), sentinel): yield rows if col_count is None else [r[:col_count] for r in rows] finally: cursor.close()
7d17b36addb33355e49f5ccc5ec789ececfc13f9d0eec73e87653acf1936ee01
import datetime import re import uuid from functools import lru_cache from django.conf import settings from django.db.backends.base.operations import BaseDatabaseOperations from django.db.backends.utils import strip_quotes, truncate_name from django.db.utils import DatabaseError from django.utils import timezone from django.utils.encoding import force_bytes, force_str from django.utils.functional import cached_property from .base import Database from .utils import BulkInsertMapper, InsertVar, Oracle_datetime class DatabaseOperations(BaseDatabaseOperations): # Oracle uses NUMBER(11) and NUMBER(19) for integer fields. integer_field_ranges = { 'SmallIntegerField': (-99999999999, 99999999999), 'IntegerField': (-99999999999, 99999999999), 'BigIntegerField': (-9999999999999999999, 9999999999999999999), 'PositiveSmallIntegerField': (0, 99999999999), 'PositiveIntegerField': (0, 99999999999), } set_operators = {**BaseDatabaseOperations.set_operators, 'difference': 'MINUS'} # TODO: colorize this SQL code with style.SQL_KEYWORD(), etc. _sequence_reset_sql = """ DECLARE table_value integer; seq_value integer; seq_name user_tab_identity_cols.sequence_name%%TYPE; BEGIN BEGIN SELECT sequence_name INTO seq_name FROM user_tab_identity_cols WHERE table_name = '%(table_name)s' AND column_name = '%(column_name)s'; EXCEPTION WHEN NO_DATA_FOUND THEN seq_name := '%(no_autofield_sequence_name)s'; END; SELECT NVL(MAX(%(column)s), 0) INTO table_value FROM %(table)s; SELECT NVL(last_number - cache_size, 0) INTO seq_value FROM user_sequences WHERE sequence_name = seq_name; WHILE table_value > seq_value LOOP EXECUTE IMMEDIATE 'SELECT "'||seq_name||'".nextval FROM DUAL' INTO seq_value; END LOOP; END; /""" # Oracle doesn't support string without precision; use the max string size. cast_char_field_without_max_length = 'NVARCHAR2(2000)' cast_data_types = { 'AutoField': 'NUMBER(11)', 'BigAutoField': 'NUMBER(19)', 'TextField': cast_char_field_without_max_length, } def cache_key_culling_sql(self): return 'SELECT cache_key FROM %s ORDER BY cache_key OFFSET %%s ROWS FETCH FIRST 1 ROWS ONLY' def date_extract_sql(self, lookup_type, field_name): if lookup_type == 'week_day': # TO_CHAR(field, 'D') returns an integer from 1-7, where 1=Sunday. return "TO_CHAR(%s, 'D')" % field_name elif lookup_type == 'week': # IW = ISO week number return "TO_CHAR(%s, 'IW')" % field_name elif lookup_type == 'quarter': return "TO_CHAR(%s, 'Q')" % field_name elif lookup_type == 'iso_year': return "TO_CHAR(%s, 'IYYY')" % field_name else: # https://docs.oracle.com/en/database/oracle/oracle-database/18/sqlrf/EXTRACT-datetime.html return "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name) def date_trunc_sql(self, lookup_type, field_name): # https://docs.oracle.com/en/database/oracle/oracle-database/18/sqlrf/ROUND-and-TRUNC-Date-Functions.html if lookup_type in ('year', 'month'): return "TRUNC(%s, '%s')" % (field_name, lookup_type.upper()) elif lookup_type == 'quarter': return "TRUNC(%s, 'Q')" % field_name elif lookup_type == 'week': return "TRUNC(%s, 'IW')" % field_name else: return "TRUNC(%s)" % field_name # Oracle crashes with "ORA-03113: end-of-file on communication channel" # if the time zone name is passed in parameter. Use interpolation instead. # https://groups.google.com/forum/#!msg/django-developers/zwQju7hbG78/9l934yelwfsJ # This regexp matches all time zone names from the zoneinfo database. _tzname_re = re.compile(r'^[\w/:+-]+$') def _prepare_tzname_delta(self, tzname): if '+' in tzname: return tzname[tzname.find('+'):] elif '-' in tzname: return tzname[tzname.find('-'):] return tzname def _convert_field_to_tz(self, field_name, tzname): if not settings.USE_TZ: return field_name if not self._tzname_re.match(tzname): raise ValueError("Invalid time zone name: %s" % tzname) # Convert from connection timezone to the local time, returning # TIMESTAMP WITH TIME ZONE and cast it back to TIMESTAMP to strip the # TIME ZONE details. if self.connection.timezone_name != tzname: return "CAST((FROM_TZ(%s, '%s') AT TIME ZONE '%s') AS TIMESTAMP)" % ( field_name, self.connection.timezone_name, self._prepare_tzname_delta(tzname), ) return field_name def datetime_cast_date_sql(self, field_name, tzname): field_name = self._convert_field_to_tz(field_name, tzname) return 'TRUNC(%s)' % field_name def datetime_cast_time_sql(self, field_name, tzname): # Since `TimeField` values are stored as TIMESTAMP where only the date # part is ignored, convert the field to the specified timezone. return self._convert_field_to_tz(field_name, tzname) def datetime_extract_sql(self, lookup_type, field_name, tzname): field_name = self._convert_field_to_tz(field_name, tzname) return self.date_extract_sql(lookup_type, field_name) def datetime_trunc_sql(self, lookup_type, field_name, tzname): field_name = self._convert_field_to_tz(field_name, tzname) # https://docs.oracle.com/en/database/oracle/oracle-database/18/sqlrf/ROUND-and-TRUNC-Date-Functions.html if lookup_type in ('year', 'month'): sql = "TRUNC(%s, '%s')" % (field_name, lookup_type.upper()) elif lookup_type == 'quarter': sql = "TRUNC(%s, 'Q')" % field_name elif lookup_type == 'week': sql = "TRUNC(%s, 'IW')" % field_name elif lookup_type == 'day': sql = "TRUNC(%s)" % field_name elif lookup_type == 'hour': sql = "TRUNC(%s, 'HH24')" % field_name elif lookup_type == 'minute': sql = "TRUNC(%s, 'MI')" % field_name else: sql = "CAST(%s AS DATE)" % field_name # Cast to DATE removes sub-second precision. return sql def time_trunc_sql(self, lookup_type, field_name): # The implementation is similar to `datetime_trunc_sql` as both # `DateTimeField` and `TimeField` are stored as TIMESTAMP where # the date part of the later is ignored. if lookup_type == 'hour': sql = "TRUNC(%s, 'HH24')" % field_name elif lookup_type == 'minute': sql = "TRUNC(%s, 'MI')" % field_name elif lookup_type == 'second': sql = "CAST(%s AS DATE)" % field_name # Cast to DATE removes sub-second precision. return sql def get_db_converters(self, expression): converters = super().get_db_converters(expression) internal_type = expression.output_field.get_internal_type() if internal_type == 'TextField': converters.append(self.convert_textfield_value) elif internal_type == 'BinaryField': converters.append(self.convert_binaryfield_value) elif internal_type in ['BooleanField', 'NullBooleanField']: converters.append(self.convert_booleanfield_value) elif internal_type == 'DateTimeField': if settings.USE_TZ: converters.append(self.convert_datetimefield_value) elif internal_type == 'DateField': converters.append(self.convert_datefield_value) elif internal_type == 'TimeField': converters.append(self.convert_timefield_value) elif internal_type == 'UUIDField': converters.append(self.convert_uuidfield_value) # Oracle stores empty strings as null. If the field accepts the empty # string, undo this to adhere to the Django convention of using # the empty string instead of null. if expression.field.empty_strings_allowed: converters.append( self.convert_empty_bytes if internal_type == 'BinaryField' else self.convert_empty_string ) return converters def convert_textfield_value(self, value, expression, connection): if isinstance(value, Database.LOB): value = value.read() return value def convert_binaryfield_value(self, value, expression, connection): if isinstance(value, Database.LOB): value = force_bytes(value.read()) return value def convert_booleanfield_value(self, value, expression, connection): if value in (0, 1): value = bool(value) return value # cx_Oracle always returns datetime.datetime objects for # DATE and TIMESTAMP columns, but Django wants to see a # python datetime.date, .time, or .datetime. def convert_datetimefield_value(self, value, expression, connection): if value is not None: value = timezone.make_aware(value, self.connection.timezone) return value def convert_datefield_value(self, value, expression, connection): if isinstance(value, Database.Timestamp): value = value.date() return value def convert_timefield_value(self, value, expression, connection): if isinstance(value, Database.Timestamp): value = value.time() return value def convert_uuidfield_value(self, value, expression, connection): if value is not None: value = uuid.UUID(value) return value @staticmethod def convert_empty_string(value, expression, connection): return '' if value is None else value @staticmethod def convert_empty_bytes(value, expression, connection): return b'' if value is None else value def deferrable_sql(self): return " DEFERRABLE INITIALLY DEFERRED" def fetch_returned_insert_id(self, cursor): value = cursor._insert_id_var.getvalue() if value is None or value == []: # cx_Oracle < 6.3 returns None, >= 6.3 returns empty list. raise DatabaseError( 'The database did not return a new row id. Probably "ORA-1403: ' 'no data found" was raised internally but was hidden by the ' 'Oracle OCI library (see https://code.djangoproject.com/ticket/28859).' ) # cx_Oracle < 7 returns value, >= 7 returns list with single value. return value[0] if isinstance(value, list) else value def field_cast_sql(self, db_type, internal_type): if db_type and db_type.endswith('LOB'): return "DBMS_LOB.SUBSTR(%s)" else: return "%s" def no_limit_value(self): return None def limit_offset_sql(self, low_mark, high_mark): fetch, offset = self._get_limit_offset_params(low_mark, high_mark) return ' '.join(sql for sql in ( ('OFFSET %d ROWS' % offset) if offset else None, ('FETCH FIRST %d ROWS ONLY' % fetch) if fetch else None, ) if sql) def last_executed_query(self, cursor, sql, params): # https://cx-oracle.readthedocs.io/en/latest/cursor.html#Cursor.statement # The DB API definition does not define this attribute. statement = cursor.statement # Unlike Psycopg's `query` and MySQLdb`'s `_executed`, cx_Oracle's # `statement` doesn't contain the query parameters. Substitute # parameters manually. if isinstance(params, (tuple, list)): for i, param in enumerate(params): statement = statement.replace(':arg%d' % i, force_str(param, errors='replace')) elif isinstance(params, dict): for key, param in params.items(): statement = statement.replace(':%s' % key, force_str(param, errors='replace')) return statement def last_insert_id(self, cursor, table_name, pk_name): sq_name = self._get_sequence_name(cursor, strip_quotes(table_name), pk_name) cursor.execute('"%s".currval' % sq_name) return cursor.fetchone()[0] def lookup_cast(self, lookup_type, internal_type=None): if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'): return "UPPER(%s)" return "%s" def max_in_list_size(self): return 1000 def max_name_length(self): return 30 def pk_default_value(self): return "NULL" def prep_for_iexact_query(self, x): return x def process_clob(self, value): if value is None: return '' return value.read() def quote_name(self, name): # SQL92 requires delimited (quoted) names to be case-sensitive. When # not quoted, Oracle has case-insensitive behavior for identifiers, but # always defaults to uppercase. # We simplify things by making Oracle identifiers always uppercase. if not name.startswith('"') and not name.endswith('"'): name = '"%s"' % truncate_name(name.upper(), self.max_name_length()) # Oracle puts the query text into a (query % args) construct, so % signs # in names need to be escaped. The '%%' will be collapsed back to '%' at # that stage so we aren't really making the name longer here. name = name.replace('%', '%%') return name.upper() def random_function_sql(self): return "DBMS_RANDOM.RANDOM" def regex_lookup(self, lookup_type): if lookup_type == 'regex': match_option = "'c'" else: match_option = "'i'" return 'REGEXP_LIKE(%%s, %%s, %s)' % match_option def return_insert_id(self, field): return 'RETURNING %s INTO %%s', (InsertVar(field),) def __foreign_key_constraints(self, table_name, recursive): with self.connection.cursor() as cursor: if recursive: cursor.execute(""" SELECT user_tables.table_name, rcons.constraint_name FROM user_tables JOIN user_constraints cons ON (user_tables.table_name = cons.table_name AND cons.constraint_type = ANY('P', 'U')) LEFT JOIN user_constraints rcons ON (user_tables.table_name = rcons.table_name AND rcons.constraint_type = 'R') START WITH user_tables.table_name = UPPER(%s) CONNECT BY NOCYCLE PRIOR cons.constraint_name = rcons.r_constraint_name GROUP BY user_tables.table_name, rcons.constraint_name HAVING user_tables.table_name != UPPER(%s) ORDER BY MAX(level) DESC """, (table_name, table_name)) else: cursor.execute(""" SELECT cons.table_name, cons.constraint_name FROM user_constraints cons WHERE cons.constraint_type = 'R' AND cons.table_name = UPPER(%s) """, (table_name,)) return cursor.fetchall() @cached_property def _foreign_key_constraints(self): # 512 is large enough to fit the ~330 tables (as of this writing) in # Django's test suite. return lru_cache(maxsize=512)(self.__foreign_key_constraints) def sql_flush(self, style, tables, sequences, allow_cascade=False): if tables: truncated_tables = {table.upper() for table in tables} constraints = set() # Oracle's TRUNCATE CASCADE only works with ON DELETE CASCADE # foreign keys which Django doesn't define. Emulate the # PostgreSQL behavior which truncates all dependent tables by # manually retrieving all foreign key constraints and resolving # dependencies. for table in tables: for foreign_table, constraint in self._foreign_key_constraints(table, recursive=allow_cascade): if allow_cascade: truncated_tables.add(foreign_table) constraints.add((foreign_table, constraint)) sql = [ "%s %s %s %s %s %s %s %s;" % ( style.SQL_KEYWORD('ALTER'), style.SQL_KEYWORD('TABLE'), style.SQL_FIELD(self.quote_name(table)), style.SQL_KEYWORD('DISABLE'), style.SQL_KEYWORD('CONSTRAINT'), style.SQL_FIELD(self.quote_name(constraint)), style.SQL_KEYWORD('KEEP'), style.SQL_KEYWORD('INDEX'), ) for table, constraint in constraints ] + [ "%s %s %s;" % ( style.SQL_KEYWORD('TRUNCATE'), style.SQL_KEYWORD('TABLE'), style.SQL_FIELD(self.quote_name(table)), ) for table in truncated_tables ] + [ "%s %s %s %s %s %s;" % ( style.SQL_KEYWORD('ALTER'), style.SQL_KEYWORD('TABLE'), style.SQL_FIELD(self.quote_name(table)), style.SQL_KEYWORD('ENABLE'), style.SQL_KEYWORD('CONSTRAINT'), style.SQL_FIELD(self.quote_name(constraint)), ) for table, constraint in constraints ] # Since we've just deleted all the rows, running our sequence # ALTER code will reset the sequence to 0. sql.extend(self.sequence_reset_by_name_sql(style, sequences)) return sql else: return [] def sequence_reset_by_name_sql(self, style, sequences): sql = [] for sequence_info in sequences: no_autofield_sequence_name = self._get_no_autofield_sequence_name(sequence_info['table']) table = self.quote_name(sequence_info['table']) column = self.quote_name(sequence_info['column'] or 'id') query = self._sequence_reset_sql % { 'no_autofield_sequence_name': no_autofield_sequence_name, 'table': table, 'column': column, 'table_name': strip_quotes(table), 'column_name': strip_quotes(column), } sql.append(query) return sql def sequence_reset_sql(self, style, model_list): from django.db import models output = [] query = self._sequence_reset_sql for model in model_list: for f in model._meta.local_fields: if isinstance(f, models.AutoField): no_autofield_sequence_name = self._get_no_autofield_sequence_name(model._meta.db_table) table = self.quote_name(model._meta.db_table) column = self.quote_name(f.column) output.append(query % { 'no_autofield_sequence_name': no_autofield_sequence_name, 'table': table, 'column': column, 'table_name': strip_quotes(table), 'column_name': strip_quotes(column), }) # Only one AutoField is allowed per model, so don't # continue to loop break for f in model._meta.many_to_many: if not f.remote_field.through: no_autofield_sequence_name = self._get_no_autofield_sequence_name(f.m2m_db_table()) table = self.quote_name(f.m2m_db_table()) column = self.quote_name('id') output.append(query % { 'no_autofield_sequence_name': no_autofield_sequence_name, 'table': table, 'column': column, 'table_name': strip_quotes(table), 'column_name': 'ID', }) return output def start_transaction_sql(self): return '' def tablespace_sql(self, tablespace, inline=False): if inline: return "USING INDEX TABLESPACE %s" % self.quote_name(tablespace) else: return "TABLESPACE %s" % self.quote_name(tablespace) def adapt_datefield_value(self, value): """ Transform a date value to an object compatible with what is expected by the backend driver for date columns. The default implementation transforms the date to text, but that is not necessary for Oracle. """ return value def adapt_datetimefield_value(self, value): """ Transform a datetime value to an object compatible with what is expected by the backend driver for datetime columns. If naive datetime is passed assumes that is in UTC. Normally Django models.DateTimeField makes sure that if USE_TZ is True passed datetime is timezone aware. """ if value is None: return None # Expression values are adapted by the database. if hasattr(value, 'resolve_expression'): return value # cx_Oracle doesn't support tz-aware datetimes if timezone.is_aware(value): if settings.USE_TZ: value = timezone.make_naive(value, self.connection.timezone) else: raise ValueError("Oracle backend does not support timezone-aware datetimes when USE_TZ is False.") return Oracle_datetime.from_datetime(value) def adapt_timefield_value(self, value): if value is None: return None # Expression values are adapted by the database. if hasattr(value, 'resolve_expression'): return value if isinstance(value, str): return datetime.datetime.strptime(value, '%H:%M:%S') # Oracle doesn't support tz-aware times if timezone.is_aware(value): raise ValueError("Oracle backend does not support timezone-aware times.") return Oracle_datetime(1900, 1, 1, value.hour, value.minute, value.second, value.microsecond) def combine_expression(self, connector, sub_expressions): lhs, rhs = sub_expressions if connector == '%%': return 'MOD(%s)' % ','.join(sub_expressions) elif connector == '&': return 'BITAND(%s)' % ','.join(sub_expressions) elif connector == '|': return 'BITAND(-%(lhs)s-1,%(rhs)s)+%(lhs)s' % {'lhs': lhs, 'rhs': rhs} elif connector == '<<': return '(%(lhs)s * POWER(2, %(rhs)s))' % {'lhs': lhs, 'rhs': rhs} elif connector == '>>': return 'FLOOR(%(lhs)s / POWER(2, %(rhs)s))' % {'lhs': lhs, 'rhs': rhs} elif connector == '^': return 'POWER(%s)' % ','.join(sub_expressions) return super().combine_expression(connector, sub_expressions) def _get_no_autofield_sequence_name(self, table): """ Manually created sequence name to keep backward compatibility for AutoFields that aren't Oracle identity columns. """ name_length = self.max_name_length() - 3 return '%s_SQ' % truncate_name(strip_quotes(table), name_length).upper() def _get_sequence_name(self, cursor, table, pk_name): cursor.execute(""" SELECT sequence_name FROM user_tab_identity_cols WHERE table_name = UPPER(%s) AND column_name = UPPER(%s)""", [table, pk_name]) row = cursor.fetchone() return self._get_no_autofield_sequence_name(table) if row is None else row[0] def bulk_insert_sql(self, fields, placeholder_rows): query = [] for row in placeholder_rows: select = [] for i, placeholder in enumerate(row): # A model without any fields has fields=[None]. if fields[i]: internal_type = getattr(fields[i], 'target_field', fields[i]).get_internal_type() placeholder = BulkInsertMapper.types.get(internal_type, '%s') % placeholder # Add columns aliases to the first select to avoid "ORA-00918: # column ambiguously defined" when two or more columns in the # first select have the same value. if not query: placeholder = '%s col_%s' % (placeholder, i) select.append(placeholder) query.append('SELECT %s FROM DUAL' % ', '.join(select)) # Bulk insert to tables with Oracle identity columns causes Oracle to # add sequence.nextval to it. Sequence.nextval cannot be used with the # UNION operator. To prevent incorrect SQL, move UNION to a subquery. return 'SELECT * FROM (%s)' % ' UNION ALL '.join(query) def subtract_temporals(self, internal_type, lhs, rhs): if internal_type == 'DateField': lhs_sql, lhs_params = lhs rhs_sql, rhs_params = rhs return "NUMTODSINTERVAL(TO_NUMBER(%s - %s), 'DAY')" % (lhs_sql, rhs_sql), lhs_params + rhs_params return super().subtract_temporals(internal_type, lhs, rhs) def bulk_batch_size(self, fields, objs): """Oracle restricts the number of parameters in a query.""" if fields: return self.connection.features.max_query_params // len(fields) return len(objs)
be76a5424b40cf943e2b305bc7ace0235e51f842dff1d38df8a33b78fda21027
from psycopg2.extras import Inet from django.conf import settings from django.db import NotSupportedError from django.db.backends.base.operations import BaseDatabaseOperations class DatabaseOperations(BaseDatabaseOperations): cast_char_field_without_max_length = 'varchar' explain_prefix = 'EXPLAIN' cast_data_types = { 'AutoField': 'integer', 'BigAutoField': 'bigint', } def unification_cast_sql(self, output_field): internal_type = output_field.get_internal_type() if internal_type in ("GenericIPAddressField", "IPAddressField", "TimeField", "UUIDField"): # PostgreSQL will resolve a union as type 'text' if input types are # 'unknown'. # https://www.postgresql.org/docs/current/typeconv-union-case.html # These fields cannot be implicitly cast back in the default # PostgreSQL configuration so we need to explicitly cast them. # We must also remove components of the type within brackets: # varchar(255) -> varchar. return 'CAST(%%s AS %s)' % output_field.db_type(self.connection).split('(')[0] return '%s' def date_extract_sql(self, lookup_type, field_name): # https://www.postgresql.org/docs/current/functions-datetime.html#FUNCTIONS-DATETIME-EXTRACT if lookup_type == 'week_day': # For consistency across backends, we return Sunday=1, Saturday=7. return "EXTRACT('dow' FROM %s) + 1" % field_name elif lookup_type == 'iso_year': return "EXTRACT('isoyear' FROM %s)" % field_name else: return "EXTRACT('%s' FROM %s)" % (lookup_type, field_name) def date_trunc_sql(self, lookup_type, field_name): # https://www.postgresql.org/docs/current/functions-datetime.html#FUNCTIONS-DATETIME-TRUNC return "DATE_TRUNC('%s', %s)" % (lookup_type, field_name) def _prepare_tzname_delta(self, tzname): if '+' in tzname: return tzname.replace('+', '-') elif '-' in tzname: return tzname.replace('-', '+') return tzname def _convert_field_to_tz(self, field_name, tzname): if settings.USE_TZ: field_name = "%s AT TIME ZONE '%s'" % (field_name, self._prepare_tzname_delta(tzname)) return field_name def datetime_cast_date_sql(self, field_name, tzname): field_name = self._convert_field_to_tz(field_name, tzname) return '(%s)::date' % field_name def datetime_cast_time_sql(self, field_name, tzname): field_name = self._convert_field_to_tz(field_name, tzname) return '(%s)::time' % field_name def datetime_extract_sql(self, lookup_type, field_name, tzname): field_name = self._convert_field_to_tz(field_name, tzname) return self.date_extract_sql(lookup_type, field_name) def datetime_trunc_sql(self, lookup_type, field_name, tzname): field_name = self._convert_field_to_tz(field_name, tzname) # https://www.postgresql.org/docs/current/functions-datetime.html#FUNCTIONS-DATETIME-TRUNC return "DATE_TRUNC('%s', %s)" % (lookup_type, field_name) def time_trunc_sql(self, lookup_type, field_name): return "DATE_TRUNC('%s', %s)::time" % (lookup_type, field_name) def deferrable_sql(self): return " DEFERRABLE INITIALLY DEFERRED" def fetch_returned_insert_ids(self, cursor): """ Given a cursor object that has just performed an INSERT...RETURNING statement into a table that has an auto-incrementing ID, return the list of newly created IDs. """ return [item[0] for item in cursor.fetchall()] def lookup_cast(self, lookup_type, internal_type=None): lookup = '%s' # Cast text lookups to text to allow things like filter(x__contains=4) if lookup_type in ('iexact', 'contains', 'icontains', 'startswith', 'istartswith', 'endswith', 'iendswith', 'regex', 'iregex'): if internal_type in ('IPAddressField', 'GenericIPAddressField'): lookup = "HOST(%s)" elif internal_type in ('CICharField', 'CIEmailField', 'CITextField'): lookup = '%s::citext' else: lookup = "%s::text" # Use UPPER(x) for case-insensitive lookups; it's faster. if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'): lookup = 'UPPER(%s)' % lookup return lookup def no_limit_value(self): return None def prepare_sql_script(self, sql): return [sql] def quote_name(self, name): if name.startswith('"') and name.endswith('"'): return name # Quoting once is enough. return '"%s"' % name def set_time_zone_sql(self): return "SET TIME ZONE %s" def sql_flush(self, style, tables, sequences, allow_cascade=False): if tables: # Perform a single SQL 'TRUNCATE x, y, z...;' statement. It allows # us to truncate tables referenced by a foreign key in any other # table. tables_sql = ', '.join( style.SQL_FIELD(self.quote_name(table)) for table in tables) if allow_cascade: sql = ['%s %s %s;' % ( style.SQL_KEYWORD('TRUNCATE'), tables_sql, style.SQL_KEYWORD('CASCADE'), )] else: sql = ['%s %s;' % ( style.SQL_KEYWORD('TRUNCATE'), tables_sql, )] sql.extend(self.sequence_reset_by_name_sql(style, sequences)) return sql else: return [] def sequence_reset_by_name_sql(self, style, sequences): # 'ALTER SEQUENCE sequence_name RESTART WITH 1;'... style SQL statements # to reset sequence indices sql = [] for sequence_info in sequences: table_name = sequence_info['table'] # 'id' will be the case if it's an m2m using an autogenerated # intermediate table (see BaseDatabaseIntrospection.sequence_list). column_name = sequence_info['column'] or 'id' sql.append("%s setval(pg_get_serial_sequence('%s','%s'), 1, false);" % ( style.SQL_KEYWORD('SELECT'), style.SQL_TABLE(self.quote_name(table_name)), style.SQL_FIELD(column_name), )) return sql def tablespace_sql(self, tablespace, inline=False): if inline: return "USING INDEX TABLESPACE %s" % self.quote_name(tablespace) else: return "TABLESPACE %s" % self.quote_name(tablespace) def sequence_reset_sql(self, style, model_list): from django.db import models output = [] qn = self.quote_name for model in model_list: # Use `coalesce` to set the sequence for each model to the max pk value if there are records, # or 1 if there are none. Set the `is_called` property (the third argument to `setval`) to true # if there are records (as the max pk value is already in use), otherwise set it to false. # Use pg_get_serial_sequence to get the underlying sequence name from the table name # and column name (available since PostgreSQL 8) for f in model._meta.local_fields: if isinstance(f, models.AutoField): output.append( "%s setval(pg_get_serial_sequence('%s','%s'), " "coalesce(max(%s), 1), max(%s) %s null) %s %s;" % ( style.SQL_KEYWORD('SELECT'), style.SQL_TABLE(qn(model._meta.db_table)), style.SQL_FIELD(f.column), style.SQL_FIELD(qn(f.column)), style.SQL_FIELD(qn(f.column)), style.SQL_KEYWORD('IS NOT'), style.SQL_KEYWORD('FROM'), style.SQL_TABLE(qn(model._meta.db_table)), ) ) break # Only one AutoField is allowed per model, so don't bother continuing. for f in model._meta.many_to_many: if not f.remote_field.through: output.append( "%s setval(pg_get_serial_sequence('%s','%s'), " "coalesce(max(%s), 1), max(%s) %s null) %s %s;" % ( style.SQL_KEYWORD('SELECT'), style.SQL_TABLE(qn(f.m2m_db_table())), style.SQL_FIELD('id'), style.SQL_FIELD(qn('id')), style.SQL_FIELD(qn('id')), style.SQL_KEYWORD('IS NOT'), style.SQL_KEYWORD('FROM'), style.SQL_TABLE(qn(f.m2m_db_table())) ) ) return output def prep_for_iexact_query(self, x): return x def max_name_length(self): """ Return the maximum length of an identifier. The maximum length of an identifier is 63 by default, but can be changed by recompiling PostgreSQL after editing the NAMEDATALEN macro in src/include/pg_config_manual.h. This implementation returns 63, but can be overridden by a custom database backend that inherits most of its behavior from this one. """ return 63 def distinct_sql(self, fields, params): if fields: params = [param for param_list in params for param in param_list] return (['DISTINCT ON (%s)' % ', '.join(fields)], params) else: return ['DISTINCT'], [] def last_executed_query(self, cursor, sql, params): # http://initd.org/psycopg/docs/cursor.html#cursor.query # The query attribute is a Psycopg extension to the DB API 2.0. if cursor.query is not None: return cursor.query.decode() return None def return_insert_id(self, field): return "RETURNING %s", () def bulk_insert_sql(self, fields, placeholder_rows): placeholder_rows_sql = (", ".join(row) for row in placeholder_rows) values_sql = ", ".join("(%s)" % sql for sql in placeholder_rows_sql) return "VALUES " + values_sql def adapt_datefield_value(self, value): return value def adapt_datetimefield_value(self, value): return value def adapt_timefield_value(self, value): return value def adapt_ipaddressfield_value(self, value): if value: return Inet(value) return None def subtract_temporals(self, internal_type, lhs, rhs): if internal_type == 'DateField': lhs_sql, lhs_params = lhs rhs_sql, rhs_params = rhs return "(interval '1 day' * (%s - %s))" % (lhs_sql, rhs_sql), lhs_params + rhs_params return super().subtract_temporals(internal_type, lhs, rhs) def window_frame_range_start_end(self, start=None, end=None): start_, end_ = super().window_frame_range_start_end(start, end) if (start and start < 0) or (end and end > 0): raise NotSupportedError( 'PostgreSQL only supports UNBOUNDED together with PRECEDING ' 'and FOLLOWING.' ) return start_, end_ def explain_query_prefix(self, format=None, **options): prefix = super().explain_query_prefix(format) extra = {} if format: extra['FORMAT'] = format if options: extra.update({ name.upper(): 'true' if value else 'false' for name, value in options.items() }) if extra: prefix += ' (%s)' % ', '.join('%s %s' % i for i in extra.items()) return prefix def ignore_conflicts_suffix_sql(self, ignore_conflicts=None): return 'ON CONFLICT DO NOTHING' if ignore_conflicts else super().ignore_conflicts_suffix_sql(ignore_conflicts)
feac06877a019c8b4230f4cc84b21f09f5d0e37ca5919bc26a442be2f420d296
import datetime import json from psycopg2.extras import DateRange, DateTimeTZRange, NumericRange, Range from django.contrib.postgres import forms, lookups from django.db import models from .utils import AttributeSetter __all__ = [ 'RangeField', 'IntegerRangeField', 'BigIntegerRangeField', 'DecimalRangeField', 'DateTimeRangeField', 'DateRangeField', 'FloatRangeField', ] class RangeField(models.Field): empty_strings_allowed = False def __init__(self, *args, **kwargs): # Initializing base_field here ensures that its model matches the model for self. if hasattr(self, 'base_field'): self.base_field = self.base_field() super().__init__(*args, **kwargs) @property def model(self): try: return self.__dict__['model'] except KeyError: raise AttributeError("'%s' object has no attribute 'model'" % self.__class__.__name__) @model.setter def model(self, model): self.__dict__['model'] = model self.base_field.model = model def get_prep_value(self, value): if value is None: return None elif isinstance(value, Range): return value elif isinstance(value, (list, tuple)): return self.range_type(value[0], value[1]) return value def to_python(self, value): if isinstance(value, str): # Assume we're deserializing vals = json.loads(value) for end in ('lower', 'upper'): if end in vals: vals[end] = self.base_field.to_python(vals[end]) value = self.range_type(**vals) elif isinstance(value, (list, tuple)): value = self.range_type(value[0], value[1]) return value def set_attributes_from_name(self, name): super().set_attributes_from_name(name) self.base_field.set_attributes_from_name(name) def value_to_string(self, obj): value = self.value_from_object(obj) if value is None: return None if value.isempty: return json.dumps({"empty": True}) base_field = self.base_field result = {"bounds": value._bounds} for end in ('lower', 'upper'): val = getattr(value, end) if val is None: result[end] = None else: obj = AttributeSetter(base_field.attname, val) result[end] = base_field.value_to_string(obj) return json.dumps(result) def formfield(self, **kwargs): kwargs.setdefault('form_class', self.form_field) return super().formfield(**kwargs) class IntegerRangeField(RangeField): base_field = models.IntegerField range_type = NumericRange form_field = forms.IntegerRangeField def db_type(self, connection): return 'int4range' class BigIntegerRangeField(RangeField): base_field = models.BigIntegerField range_type = NumericRange form_field = forms.IntegerRangeField def db_type(self, connection): return 'int8range' class DecimalRangeField(RangeField): base_field = models.DecimalField range_type = NumericRange form_field = forms.DecimalRangeField def db_type(self, connection): return 'numrange' class FloatRangeField(RangeField): system_check_deprecated_details = { 'msg': ( 'FloatRangeField is deprecated and will be removed in Django 3.1.' ), 'hint': 'Use DecimalRangeField instead.', 'id': 'fields.W902', } base_field = models.FloatField range_type = NumericRange form_field = forms.FloatRangeField def db_type(self, connection): return 'numrange' class DateTimeRangeField(RangeField): base_field = models.DateTimeField range_type = DateTimeTZRange form_field = forms.DateTimeRangeField def db_type(self, connection): return 'tstzrange' class DateRangeField(RangeField): base_field = models.DateField range_type = DateRange form_field = forms.DateRangeField def db_type(self, connection): return 'daterange' RangeField.register_lookup(lookups.DataContains) RangeField.register_lookup(lookups.ContainedBy) RangeField.register_lookup(lookups.Overlap) class DateTimeRangeContains(models.Lookup): """ Lookup for Date/DateTimeRange containment to cast the rhs to the correct type. """ lookup_name = 'contains' def process_rhs(self, compiler, connection): # Transform rhs value for db lookup. if isinstance(self.rhs, datetime.date): output_field = models.DateTimeField() if isinstance(self.rhs, datetime.datetime) else models.DateField() value = models.Value(self.rhs, output_field=output_field) self.rhs = value.resolve_expression(compiler.query) return super().process_rhs(compiler, connection) def as_sql(self, compiler, connection): lhs, lhs_params = self.process_lhs(compiler, connection) rhs, rhs_params = self.process_rhs(compiler, connection) params = lhs_params + rhs_params # Cast the rhs if needed. cast_sql = '' if ( isinstance(self.rhs, models.Expression) and self.rhs._output_field_or_none and # Skip cast if rhs has a matching range type. not isinstance(self.rhs._output_field_or_none, self.lhs.output_field.__class__) ): cast_internal_type = self.lhs.output_field.base_field.get_internal_type() cast_sql = '::{}'.format(connection.data_types.get(cast_internal_type)) return '%s @> %s%s' % (lhs, rhs, cast_sql), params DateRangeField.register_lookup(DateTimeRangeContains) DateTimeRangeField.register_lookup(DateTimeRangeContains) class RangeContainedBy(models.Lookup): lookup_name = 'contained_by' type_mapping = { 'integer': 'int4range', 'bigint': 'int8range', 'double precision': 'numrange', 'date': 'daterange', 'timestamp with time zone': 'tstzrange', } def as_sql(self, qn, connection): field = self.lhs.output_field if isinstance(field, models.FloatField): sql = '%s::numeric <@ %s::{}'.format(self.type_mapping[field.db_type(connection)]) else: sql = '%s <@ %s::{}'.format(self.type_mapping[field.db_type(connection)]) lhs, lhs_params = self.process_lhs(qn, connection) rhs, rhs_params = self.process_rhs(qn, connection) params = lhs_params + rhs_params return sql % (lhs, rhs), params def get_prep_lookup(self): return RangeField().get_prep_value(self.rhs) models.DateField.register_lookup(RangeContainedBy) models.DateTimeField.register_lookup(RangeContainedBy) models.IntegerField.register_lookup(RangeContainedBy) models.BigIntegerField.register_lookup(RangeContainedBy) models.FloatField.register_lookup(RangeContainedBy) @RangeField.register_lookup class FullyLessThan(lookups.PostgresSimpleLookup): lookup_name = 'fully_lt' operator = '<<' @RangeField.register_lookup class FullGreaterThan(lookups.PostgresSimpleLookup): lookup_name = 'fully_gt' operator = '>>' @RangeField.register_lookup class NotLessThan(lookups.PostgresSimpleLookup): lookup_name = 'not_lt' operator = '&>' @RangeField.register_lookup class NotGreaterThan(lookups.PostgresSimpleLookup): lookup_name = 'not_gt' operator = '&<' @RangeField.register_lookup class AdjacentToLookup(lookups.PostgresSimpleLookup): lookup_name = 'adjacent_to' operator = '-|-' @RangeField.register_lookup class RangeStartsWith(models.Transform): lookup_name = 'startswith' function = 'lower' @property def output_field(self): return self.lhs.output_field.base_field @RangeField.register_lookup class RangeEndsWith(models.Transform): lookup_name = 'endswith' function = 'upper' @property def output_field(self): return self.lhs.output_field.base_field @RangeField.register_lookup class IsEmpty(models.Transform): lookup_name = 'isempty' function = 'isempty' output_field = models.BooleanField()
bf46637f504f3356aef0842688fce0c47bdeefd010e513d1b4bf6981b60e5860
import decimal import enum import json import unittest import uuid from django import forms from django.core import checks, exceptions, serializers, validators from django.core.exceptions import FieldError from django.core.management import call_command from django.db import IntegrityError, connection, models from django.test import TransactionTestCase, modify_settings, override_settings from django.test.utils import isolate_apps from django.utils import timezone from . import ( PostgreSQLSimpleTestCase, PostgreSQLTestCase, PostgreSQLWidgetTestCase, ) from .models import ( ArrayEnumModel, ArrayFieldSubclass, CharArrayModel, DateTimeArrayModel, IntegerArrayModel, NestedIntegerArrayModel, NullableIntegerArrayModel, OtherTypesArrayModel, PostgreSQLModel, Tag, ) try: from django.contrib.postgres.fields import ArrayField from django.contrib.postgres.forms import ( SimpleArrayField, SplitArrayField, SplitArrayWidget, ) from psycopg2.extras import NumericRange except ImportError: pass class TestSaveLoad(PostgreSQLTestCase): def test_integer(self): instance = IntegerArrayModel(field=[1, 2, 3]) instance.save() loaded = IntegerArrayModel.objects.get() self.assertEqual(instance.field, loaded.field) def test_char(self): instance = CharArrayModel(field=['hello', 'goodbye']) instance.save() loaded = CharArrayModel.objects.get() self.assertEqual(instance.field, loaded.field) def test_dates(self): instance = DateTimeArrayModel( datetimes=[timezone.now()], dates=[timezone.now().date()], times=[timezone.now().time()], ) instance.save() loaded = DateTimeArrayModel.objects.get() self.assertEqual(instance.datetimes, loaded.datetimes) self.assertEqual(instance.dates, loaded.dates) self.assertEqual(instance.times, loaded.times) def test_tuples(self): instance = IntegerArrayModel(field=(1,)) instance.save() loaded = IntegerArrayModel.objects.get() self.assertSequenceEqual(instance.field, loaded.field) def test_integers_passed_as_strings(self): # This checks that get_prep_value is deferred properly instance = IntegerArrayModel(field=['1']) instance.save() loaded = IntegerArrayModel.objects.get() self.assertEqual(loaded.field, [1]) def test_default_null(self): instance = NullableIntegerArrayModel() instance.save() loaded = NullableIntegerArrayModel.objects.get(pk=instance.pk) self.assertIsNone(loaded.field) self.assertEqual(instance.field, loaded.field) def test_null_handling(self): instance = NullableIntegerArrayModel(field=None) instance.save() loaded = NullableIntegerArrayModel.objects.get() self.assertEqual(instance.field, loaded.field) instance = IntegerArrayModel(field=None) with self.assertRaises(IntegrityError): instance.save() def test_nested(self): instance = NestedIntegerArrayModel(field=[[1, 2], [3, 4]]) instance.save() loaded = NestedIntegerArrayModel.objects.get() self.assertEqual(instance.field, loaded.field) def test_other_array_types(self): instance = OtherTypesArrayModel( ips=['192.168.0.1', '::1'], uuids=[uuid.uuid4()], decimals=[decimal.Decimal(1.25), 1.75], tags=[Tag(1), Tag(2), Tag(3)], json=[{'a': 1}, {'b': 2}], int_ranges=[NumericRange(10, 20), NumericRange(30, 40)], bigint_ranges=[ NumericRange(7000000000, 10000000000), NumericRange(50000000000, 70000000000), ] ) instance.save() loaded = OtherTypesArrayModel.objects.get() self.assertEqual(instance.ips, loaded.ips) self.assertEqual(instance.uuids, loaded.uuids) self.assertEqual(instance.decimals, loaded.decimals) self.assertEqual(instance.tags, loaded.tags) self.assertEqual(instance.json, loaded.json) self.assertEqual(instance.int_ranges, loaded.int_ranges) self.assertEqual(instance.bigint_ranges, loaded.bigint_ranges) def test_null_from_db_value_handling(self): instance = OtherTypesArrayModel.objects.create( ips=['192.168.0.1', '::1'], uuids=[uuid.uuid4()], decimals=[decimal.Decimal(1.25), 1.75], tags=None, ) instance.refresh_from_db() self.assertIsNone(instance.tags) self.assertEqual(instance.json, []) self.assertIsNone(instance.int_ranges) self.assertIsNone(instance.bigint_ranges) def test_model_set_on_base_field(self): instance = IntegerArrayModel() field = instance._meta.get_field('field') self.assertEqual(field.model, IntegerArrayModel) self.assertEqual(field.base_field.model, IntegerArrayModel) class TestQuerying(PostgreSQLTestCase): @classmethod def setUpTestData(cls): cls.objs = NullableIntegerArrayModel.objects.bulk_create([ NullableIntegerArrayModel(field=[1]), NullableIntegerArrayModel(field=[2]), NullableIntegerArrayModel(field=[2, 3]), NullableIntegerArrayModel(field=[20, 30, 40]), NullableIntegerArrayModel(field=None), ]) def test_empty_list(self): NullableIntegerArrayModel.objects.create(field=[]) obj = NullableIntegerArrayModel.objects.annotate( empty_array=models.Value([], output_field=ArrayField(models.IntegerField())), ).filter(field=models.F('empty_array')).get() self.assertEqual(obj.field, []) self.assertEqual(obj.empty_array, []) def test_exact(self): self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__exact=[1]), self.objs[:1] ) def test_exact_charfield(self): instance = CharArrayModel.objects.create(field=['text']) self.assertSequenceEqual( CharArrayModel.objects.filter(field=['text']), [instance] ) def test_exact_nested(self): instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]]) self.assertSequenceEqual( NestedIntegerArrayModel.objects.filter(field=[[1, 2], [3, 4]]), [instance] ) def test_isnull(self): self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__isnull=True), self.objs[-1:] ) def test_gt(self): self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__gt=[0]), self.objs[:4] ) def test_lt(self): self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__lt=[2]), self.objs[:1] ) def test_in(self): self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__in=[[1], [2]]), self.objs[:2] ) def test_in_subquery(self): IntegerArrayModel.objects.create(field=[2, 3]) self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter( field__in=IntegerArrayModel.objects.all().values_list('field', flat=True) ), self.objs[2:3] ) @unittest.expectedFailure def test_in_including_F_object(self): # This test asserts that Array objects passed to filters can be # constructed to contain F objects. This currently doesn't work as the # psycopg2 mogrify method that generates the ARRAY() syntax is # expecting literals, not column references (#27095). self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__in=[[models.F('id')]]), self.objs[:2] ) def test_in_as_F_object(self): self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__in=[models.F('field')]), self.objs[:4] ) def test_contained_by(self): self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__contained_by=[1, 2]), self.objs[:2] ) @unittest.expectedFailure def test_contained_by_including_F_object(self): # This test asserts that Array objects passed to filters can be # constructed to contain F objects. This currently doesn't work as the # psycopg2 mogrify method that generates the ARRAY() syntax is # expecting literals, not column references (#27095). self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__contained_by=[models.F('id'), 2]), self.objs[:2] ) def test_contains(self): self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__contains=[2]), self.objs[1:3] ) def test_icontains(self): # Using the __icontains lookup with ArrayField is inefficient. instance = CharArrayModel.objects.create(field=['FoO']) self.assertSequenceEqual( CharArrayModel.objects.filter(field__icontains='foo'), [instance] ) def test_contains_charfield(self): # Regression for #22907 self.assertSequenceEqual( CharArrayModel.objects.filter(field__contains=['text']), [] ) def test_contained_by_charfield(self): self.assertSequenceEqual( CharArrayModel.objects.filter(field__contained_by=['text']), [] ) def test_overlap_charfield(self): self.assertSequenceEqual( CharArrayModel.objects.filter(field__overlap=['text']), [] ) def test_index(self): self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__0=2), self.objs[1:3] ) def test_index_chained(self): self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__0__lt=3), self.objs[0:3] ) def test_index_nested(self): instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]]) self.assertSequenceEqual( NestedIntegerArrayModel.objects.filter(field__0__0=1), [instance] ) @unittest.expectedFailure def test_index_used_on_nested_data(self): instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]]) self.assertSequenceEqual( NestedIntegerArrayModel.objects.filter(field__0=[1, 2]), [instance] ) def test_overlap(self): self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__overlap=[1, 2]), self.objs[0:3] ) def test_len(self): self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__len__lte=2), self.objs[0:3] ) def test_len_empty_array(self): obj = NullableIntegerArrayModel.objects.create(field=[]) self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__len=0), [obj] ) def test_slice(self): self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__0_1=[2]), self.objs[1:3] ) self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__0_2=[2, 3]), self.objs[2:3] ) def test_order_by_slice(self): more_objs = ( NullableIntegerArrayModel.objects.create(field=[1, 637]), NullableIntegerArrayModel.objects.create(field=[2, 1]), NullableIntegerArrayModel.objects.create(field=[3, -98123]), NullableIntegerArrayModel.objects.create(field=[4, 2]), ) self.assertSequenceEqual( NullableIntegerArrayModel.objects.order_by('field__1'), [ more_objs[2], more_objs[1], more_objs[3], self.objs[2], self.objs[3], more_objs[0], self.objs[4], self.objs[1], self.objs[0], ] ) @unittest.expectedFailure def test_slice_nested(self): instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]]) self.assertSequenceEqual( NestedIntegerArrayModel.objects.filter(field__0__0_1=[1]), [instance] ) def test_usage_in_subquery(self): self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter( id__in=NullableIntegerArrayModel.objects.filter(field__len=3) ), [self.objs[3]] ) def test_enum_lookup(self): class TestEnum(enum.Enum): VALUE_1 = 'value_1' instance = ArrayEnumModel.objects.create(array_of_enums=[TestEnum.VALUE_1]) self.assertSequenceEqual( ArrayEnumModel.objects.filter(array_of_enums__contains=[TestEnum.VALUE_1]), [instance] ) def test_unsupported_lookup(self): msg = "Unsupported lookup '0_bar' for ArrayField or join on the field not permitted." with self.assertRaisesMessage(FieldError, msg): list(NullableIntegerArrayModel.objects.filter(field__0_bar=[2])) msg = "Unsupported lookup '0bar' for ArrayField or join on the field not permitted." with self.assertRaisesMessage(FieldError, msg): list(NullableIntegerArrayModel.objects.filter(field__0bar=[2])) def test_grouping_by_annotations_with_array_field_param(self): value = models.Value([1], output_field=ArrayField(models.IntegerField())) self.assertEqual( NullableIntegerArrayModel.objects.annotate( array_length=models.Func(value, 1, function='ARRAY_LENGTH'), ).values('array_length').annotate( count=models.Count('pk'), ).get()['array_length'], 1, ) class TestDateTimeExactQuerying(PostgreSQLTestCase): @classmethod def setUpTestData(cls): now = timezone.now() cls.datetimes = [now] cls.dates = [now.date()] cls.times = [now.time()] cls.objs = [ DateTimeArrayModel.objects.create(datetimes=cls.datetimes, dates=cls.dates, times=cls.times), ] def test_exact_datetimes(self): self.assertSequenceEqual( DateTimeArrayModel.objects.filter(datetimes=self.datetimes), self.objs ) def test_exact_dates(self): self.assertSequenceEqual( DateTimeArrayModel.objects.filter(dates=self.dates), self.objs ) def test_exact_times(self): self.assertSequenceEqual( DateTimeArrayModel.objects.filter(times=self.times), self.objs ) class TestOtherTypesExactQuerying(PostgreSQLTestCase): @classmethod def setUpTestData(cls): cls.ips = ['192.168.0.1', '::1'] cls.uuids = [uuid.uuid4()] cls.decimals = [decimal.Decimal(1.25), 1.75] cls.tags = [Tag(1), Tag(2), Tag(3)] cls.objs = [ OtherTypesArrayModel.objects.create( ips=cls.ips, uuids=cls.uuids, decimals=cls.decimals, tags=cls.tags, ) ] def test_exact_ip_addresses(self): self.assertSequenceEqual( OtherTypesArrayModel.objects.filter(ips=self.ips), self.objs ) def test_exact_uuids(self): self.assertSequenceEqual( OtherTypesArrayModel.objects.filter(uuids=self.uuids), self.objs ) def test_exact_decimals(self): self.assertSequenceEqual( OtherTypesArrayModel.objects.filter(decimals=self.decimals), self.objs ) def test_exact_tags(self): self.assertSequenceEqual( OtherTypesArrayModel.objects.filter(tags=self.tags), self.objs ) @isolate_apps('postgres_tests') class TestChecks(PostgreSQLSimpleTestCase): def test_field_checks(self): class MyModel(PostgreSQLModel): field = ArrayField(models.CharField()) model = MyModel() errors = model.check() self.assertEqual(len(errors), 1) # The inner CharField is missing a max_length. self.assertEqual(errors[0].id, 'postgres.E001') self.assertIn('max_length', errors[0].msg) def test_invalid_base_fields(self): class MyModel(PostgreSQLModel): field = ArrayField(models.ManyToManyField('postgres_tests.IntegerArrayModel')) model = MyModel() errors = model.check() self.assertEqual(len(errors), 1) self.assertEqual(errors[0].id, 'postgres.E002') def test_invalid_default(self): class MyModel(PostgreSQLModel): field = ArrayField(models.IntegerField(), default=[]) model = MyModel() self.assertEqual(model.check(), [ checks.Warning( msg=( "ArrayField default should be a callable instead of an " "instance so that it's not shared between all field " "instances." ), hint='Use a callable instead, e.g., use `list` instead of `[]`.', obj=MyModel._meta.get_field('field'), id='postgres.E003', ) ]) def test_valid_default(self): class MyModel(PostgreSQLModel): field = ArrayField(models.IntegerField(), default=list) model = MyModel() self.assertEqual(model.check(), []) def test_valid_default_none(self): class MyModel(PostgreSQLModel): field = ArrayField(models.IntegerField(), default=None) model = MyModel() self.assertEqual(model.check(), []) def test_nested_field_checks(self): """ Nested ArrayFields are permitted. """ class MyModel(PostgreSQLModel): field = ArrayField(ArrayField(models.CharField())) model = MyModel() errors = model.check() self.assertEqual(len(errors), 1) # The inner CharField is missing a max_length. self.assertEqual(errors[0].id, 'postgres.E001') self.assertIn('max_length', errors[0].msg) @unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific tests") class TestMigrations(TransactionTestCase): available_apps = ['postgres_tests'] def test_deconstruct(self): field = ArrayField(models.IntegerField()) name, path, args, kwargs = field.deconstruct() new = ArrayField(*args, **kwargs) self.assertEqual(type(new.base_field), type(field.base_field)) self.assertIsNot(new.base_field, field.base_field) def test_deconstruct_with_size(self): field = ArrayField(models.IntegerField(), size=3) name, path, args, kwargs = field.deconstruct() new = ArrayField(*args, **kwargs) self.assertEqual(new.size, field.size) def test_deconstruct_args(self): field = ArrayField(models.CharField(max_length=20)) name, path, args, kwargs = field.deconstruct() new = ArrayField(*args, **kwargs) self.assertEqual(new.base_field.max_length, field.base_field.max_length) def test_subclass_deconstruct(self): field = ArrayField(models.IntegerField()) name, path, args, kwargs = field.deconstruct() self.assertEqual(path, 'django.contrib.postgres.fields.ArrayField') field = ArrayFieldSubclass() name, path, args, kwargs = field.deconstruct() self.assertEqual(path, 'postgres_tests.models.ArrayFieldSubclass') @override_settings(MIGRATION_MODULES={ "postgres_tests": "postgres_tests.array_default_migrations", }) def test_adding_field_with_default(self): # See #22962 table_name = 'postgres_tests_integerarraydefaultmodel' with connection.cursor() as cursor: self.assertNotIn(table_name, connection.introspection.table_names(cursor)) call_command('migrate', 'postgres_tests', verbosity=0) with connection.cursor() as cursor: self.assertIn(table_name, connection.introspection.table_names(cursor)) call_command('migrate', 'postgres_tests', 'zero', verbosity=0) with connection.cursor() as cursor: self.assertNotIn(table_name, connection.introspection.table_names(cursor)) @override_settings(MIGRATION_MODULES={ "postgres_tests": "postgres_tests.array_index_migrations", }) def test_adding_arrayfield_with_index(self): """ ArrayField shouldn't have varchar_patterns_ops or text_patterns_ops indexes. """ table_name = 'postgres_tests_chartextarrayindexmodel' call_command('migrate', 'postgres_tests', verbosity=0) with connection.cursor() as cursor: like_constraint_columns_list = [ v['columns'] for k, v in list(connection.introspection.get_constraints(cursor, table_name).items()) if k.endswith('_like') ] # Only the CharField should have a LIKE index. self.assertEqual(like_constraint_columns_list, [['char2']]) # All fields should have regular indexes. with connection.cursor() as cursor: indexes = [ c['columns'][0] for c in connection.introspection.get_constraints(cursor, table_name).values() if c['index'] and len(c['columns']) == 1 ] self.assertIn('char', indexes) self.assertIn('char2', indexes) self.assertIn('text', indexes) call_command('migrate', 'postgres_tests', 'zero', verbosity=0) with connection.cursor() as cursor: self.assertNotIn(table_name, connection.introspection.table_names(cursor)) class TestSerialization(PostgreSQLSimpleTestCase): test_data = ( '[{"fields": {"field": "[\\"1\\", \\"2\\", null]"}, "model": "postgres_tests.integerarraymodel", "pk": null}]' ) def test_dumping(self): instance = IntegerArrayModel(field=[1, 2, None]) data = serializers.serialize('json', [instance]) self.assertEqual(json.loads(data), json.loads(self.test_data)) def test_loading(self): instance = list(serializers.deserialize('json', self.test_data))[0].object self.assertEqual(instance.field, [1, 2, None]) class TestValidation(PostgreSQLSimpleTestCase): def test_unbounded(self): field = ArrayField(models.IntegerField()) with self.assertRaises(exceptions.ValidationError) as cm: field.clean([1, None], None) self.assertEqual(cm.exception.code, 'item_invalid') self.assertEqual( cm.exception.message % cm.exception.params, 'Item 2 in the array did not validate: This field cannot be null.' ) def test_blank_true(self): field = ArrayField(models.IntegerField(blank=True, null=True)) # This should not raise a validation error field.clean([1, None], None) def test_with_size(self): field = ArrayField(models.IntegerField(), size=3) field.clean([1, 2, 3], None) with self.assertRaises(exceptions.ValidationError) as cm: field.clean([1, 2, 3, 4], None) self.assertEqual(cm.exception.messages[0], 'List contains 4 items, it should contain no more than 3.') def test_nested_array_mismatch(self): field = ArrayField(ArrayField(models.IntegerField())) field.clean([[1, 2], [3, 4]], None) with self.assertRaises(exceptions.ValidationError) as cm: field.clean([[1, 2], [3, 4, 5]], None) self.assertEqual(cm.exception.code, 'nested_array_mismatch') self.assertEqual(cm.exception.messages[0], 'Nested arrays must have the same length.') def test_with_base_field_error_params(self): field = ArrayField(models.CharField(max_length=2)) with self.assertRaises(exceptions.ValidationError) as cm: field.clean(['abc'], None) self.assertEqual(len(cm.exception.error_list), 1) exception = cm.exception.error_list[0] self.assertEqual( exception.message, 'Item 1 in the array did not validate: Ensure this value has at most 2 characters (it has 3).' ) self.assertEqual(exception.code, 'item_invalid') self.assertEqual(exception.params, {'nth': 1, 'value': 'abc', 'limit_value': 2, 'show_value': 3}) def test_with_validators(self): field = ArrayField(models.IntegerField(validators=[validators.MinValueValidator(1)])) field.clean([1, 2], None) with self.assertRaises(exceptions.ValidationError) as cm: field.clean([0], None) self.assertEqual(len(cm.exception.error_list), 1) exception = cm.exception.error_list[0] self.assertEqual( exception.message, 'Item 1 in the array did not validate: Ensure this value is greater than or equal to 1.' ) self.assertEqual(exception.code, 'item_invalid') self.assertEqual(exception.params, {'nth': 1, 'value': 0, 'limit_value': 1, 'show_value': 0}) class TestSimpleFormField(PostgreSQLSimpleTestCase): def test_valid(self): field = SimpleArrayField(forms.CharField()) value = field.clean('a,b,c') self.assertEqual(value, ['a', 'b', 'c']) def test_to_python_fail(self): field = SimpleArrayField(forms.IntegerField()) with self.assertRaises(exceptions.ValidationError) as cm: field.clean('a,b,9') self.assertEqual(cm.exception.messages[0], 'Item 1 in the array did not validate: Enter a whole number.') def test_validate_fail(self): field = SimpleArrayField(forms.CharField(required=True)) with self.assertRaises(exceptions.ValidationError) as cm: field.clean('a,b,') self.assertEqual(cm.exception.messages[0], 'Item 3 in the array did not validate: This field is required.') def test_validate_fail_base_field_error_params(self): field = SimpleArrayField(forms.CharField(max_length=2)) with self.assertRaises(exceptions.ValidationError) as cm: field.clean('abc,c,defg') errors = cm.exception.error_list self.assertEqual(len(errors), 2) first_error = errors[0] self.assertEqual( first_error.message, 'Item 1 in the array did not validate: Ensure this value has at most 2 characters (it has 3).' ) self.assertEqual(first_error.code, 'item_invalid') self.assertEqual(first_error.params, {'nth': 1, 'value': 'abc', 'limit_value': 2, 'show_value': 3}) second_error = errors[1] self.assertEqual( second_error.message, 'Item 3 in the array did not validate: Ensure this value has at most 2 characters (it has 4).' ) self.assertEqual(second_error.code, 'item_invalid') self.assertEqual(second_error.params, {'nth': 3, 'value': 'defg', 'limit_value': 2, 'show_value': 4}) def test_validators_fail(self): field = SimpleArrayField(forms.RegexField('[a-e]{2}')) with self.assertRaises(exceptions.ValidationError) as cm: field.clean('a,bc,de') self.assertEqual(cm.exception.messages[0], 'Item 1 in the array did not validate: Enter a valid value.') def test_delimiter(self): field = SimpleArrayField(forms.CharField(), delimiter='|') value = field.clean('a|b|c') self.assertEqual(value, ['a', 'b', 'c']) def test_delimiter_with_nesting(self): field = SimpleArrayField(SimpleArrayField(forms.CharField()), delimiter='|') value = field.clean('a,b|c,d') self.assertEqual(value, [['a', 'b'], ['c', 'd']]) def test_prepare_value(self): field = SimpleArrayField(forms.CharField()) value = field.prepare_value(['a', 'b', 'c']) self.assertEqual(value, 'a,b,c') def test_max_length(self): field = SimpleArrayField(forms.CharField(), max_length=2) with self.assertRaises(exceptions.ValidationError) as cm: field.clean('a,b,c') self.assertEqual(cm.exception.messages[0], 'List contains 3 items, it should contain no more than 2.') def test_min_length(self): field = SimpleArrayField(forms.CharField(), min_length=4) with self.assertRaises(exceptions.ValidationError) as cm: field.clean('a,b,c') self.assertEqual(cm.exception.messages[0], 'List contains 3 items, it should contain no fewer than 4.') def test_required(self): field = SimpleArrayField(forms.CharField(), required=True) with self.assertRaises(exceptions.ValidationError) as cm: field.clean('') self.assertEqual(cm.exception.messages[0], 'This field is required.') def test_model_field_formfield(self): model_field = ArrayField(models.CharField(max_length=27)) form_field = model_field.formfield() self.assertIsInstance(form_field, SimpleArrayField) self.assertIsInstance(form_field.base_field, forms.CharField) self.assertEqual(form_field.base_field.max_length, 27) def test_model_field_formfield_size(self): model_field = ArrayField(models.CharField(max_length=27), size=4) form_field = model_field.formfield() self.assertIsInstance(form_field, SimpleArrayField) self.assertEqual(form_field.max_length, 4) def test_model_field_choices(self): model_field = ArrayField(models.IntegerField(choices=((1, 'A'), (2, 'B')))) form_field = model_field.formfield() self.assertEqual(form_field.clean('1,2'), [1, 2]) def test_already_converted_value(self): field = SimpleArrayField(forms.CharField()) vals = ['a', 'b', 'c'] self.assertEqual(field.clean(vals), vals) def test_has_changed(self): field = SimpleArrayField(forms.IntegerField()) self.assertIs(field.has_changed([1, 2], [1, 2]), False) self.assertIs(field.has_changed([1, 2], '1,2'), False) self.assertIs(field.has_changed([1, 2], '1,2,3'), True) self.assertIs(field.has_changed([1, 2], 'a,b'), True) def test_has_changed_empty(self): field = SimpleArrayField(forms.CharField()) self.assertIs(field.has_changed(None, None), False) self.assertIs(field.has_changed(None, ''), False) self.assertIs(field.has_changed(None, []), False) self.assertIs(field.has_changed([], None), False) self.assertIs(field.has_changed([], ''), False) class TestSplitFormField(PostgreSQLSimpleTestCase): def test_valid(self): class SplitForm(forms.Form): array = SplitArrayField(forms.CharField(), size=3) data = {'array_0': 'a', 'array_1': 'b', 'array_2': 'c'} form = SplitForm(data) self.assertTrue(form.is_valid()) self.assertEqual(form.cleaned_data, {'array': ['a', 'b', 'c']}) def test_required(self): class SplitForm(forms.Form): array = SplitArrayField(forms.CharField(), required=True, size=3) data = {'array_0': '', 'array_1': '', 'array_2': ''} form = SplitForm(data) self.assertFalse(form.is_valid()) self.assertEqual(form.errors, {'array': ['This field is required.']}) def test_remove_trailing_nulls(self): class SplitForm(forms.Form): array = SplitArrayField(forms.CharField(required=False), size=5, remove_trailing_nulls=True) data = {'array_0': 'a', 'array_1': '', 'array_2': 'b', 'array_3': '', 'array_4': ''} form = SplitForm(data) self.assertTrue(form.is_valid(), form.errors) self.assertEqual(form.cleaned_data, {'array': ['a', '', 'b']}) def test_remove_trailing_nulls_not_required(self): class SplitForm(forms.Form): array = SplitArrayField( forms.CharField(required=False), size=2, remove_trailing_nulls=True, required=False, ) data = {'array_0': '', 'array_1': ''} form = SplitForm(data) self.assertTrue(form.is_valid()) self.assertEqual(form.cleaned_data, {'array': []}) def test_required_field(self): class SplitForm(forms.Form): array = SplitArrayField(forms.CharField(), size=3) data = {'array_0': 'a', 'array_1': 'b', 'array_2': ''} form = SplitForm(data) self.assertFalse(form.is_valid()) self.assertEqual(form.errors, {'array': ['Item 3 in the array did not validate: This field is required.']}) def test_invalid_integer(self): msg = 'Item 2 in the array did not validate: Ensure this value is less than or equal to 100.' with self.assertRaisesMessage(exceptions.ValidationError, msg): SplitArrayField(forms.IntegerField(max_value=100), size=2).clean([0, 101]) # To locate the widget's template. @modify_settings(INSTALLED_APPS={'append': 'django.contrib.postgres'}) def test_rendering(self): class SplitForm(forms.Form): array = SplitArrayField(forms.CharField(), size=3) self.assertHTMLEqual(str(SplitForm()), ''' <tr> <th><label for="id_array_0">Array:</label></th> <td> <input id="id_array_0" name="array_0" type="text" required> <input id="id_array_1" name="array_1" type="text" required> <input id="id_array_2" name="array_2" type="text" required> </td> </tr> ''') def test_invalid_char_length(self): field = SplitArrayField(forms.CharField(max_length=2), size=3) with self.assertRaises(exceptions.ValidationError) as cm: field.clean(['abc', 'c', 'defg']) self.assertEqual(cm.exception.messages, [ 'Item 1 in the array did not validate: Ensure this value has at most 2 characters (it has 3).', 'Item 3 in the array did not validate: Ensure this value has at most 2 characters (it has 4).', ]) def test_splitarraywidget_value_omitted_from_data(self): class Form(forms.ModelForm): field = SplitArrayField(forms.IntegerField(), required=False, size=2) class Meta: model = IntegerArrayModel fields = ('field',) form = Form({'field_0': '1', 'field_1': '2'}) self.assertEqual(form.errors, {}) obj = form.save(commit=False) self.assertEqual(obj.field, [1, 2]) def test_splitarrayfield_has_changed(self): class Form(forms.ModelForm): field = SplitArrayField(forms.IntegerField(), required=False, size=2) class Meta: model = IntegerArrayModel fields = ('field',) obj = IntegerArrayModel(field=[1, 2]) form = Form({'field_0': '1', 'field_1': '2'}, instance=obj) self.assertFalse(form.has_changed()) class TestSplitFormWidget(PostgreSQLWidgetTestCase): def test_get_context(self): self.assertEqual( SplitArrayWidget(forms.TextInput(), size=2).get_context('name', ['val1', 'val2']), { 'widget': { 'name': 'name', 'is_hidden': False, 'required': False, 'value': "['val1', 'val2']", 'attrs': {}, 'template_name': 'postgres/widgets/split_array.html', 'subwidgets': [ { 'name': 'name_0', 'is_hidden': False, 'required': False, 'value': 'val1', 'attrs': {}, 'template_name': 'django/forms/widgets/text.html', 'type': 'text', }, { 'name': 'name_1', 'is_hidden': False, 'required': False, 'value': 'val2', 'attrs': {}, 'template_name': 'django/forms/widgets/text.html', 'type': 'text', }, ] } } ) def test_render(self): self.check_html( SplitArrayWidget(forms.TextInput(), size=2), 'array', None, """ <input name="array_0" type="text"> <input name="array_1" type="text"> """ ) def test_render_attrs(self): self.check_html( SplitArrayWidget(forms.TextInput(), size=2), 'array', ['val1', 'val2'], attrs={'id': 'foo'}, html=( """ <input id="foo_0" name="array_0" type="text" value="val1"> <input id="foo_1" name="array_1" type="text" value="val2"> """ ) ) def test_value_omitted_from_data(self): widget = SplitArrayWidget(forms.TextInput(), size=2) self.assertIs(widget.value_omitted_from_data({}, {}, 'field'), True) self.assertIs(widget.value_omitted_from_data({'field_0': 'value'}, {}, 'field'), False) self.assertIs(widget.value_omitted_from_data({'field_1': 'value'}, {}, 'field'), False) self.assertIs(widget.value_omitted_from_data({'field_0': 'value', 'field_1': 'value'}, {}, 'field'), False)
d1491e54bbf0323c9837225fd9d8450d2ef388ac31b0a5a5041672ef92691e03
from django.core.serializers.json import DjangoJSONEncoder from django.db import models from .fields import ( ArrayField, BigIntegerRangeField, CICharField, CIEmailField, CITextField, DateRangeField, DateTimeRangeField, DecimalRangeField, EnumField, HStoreField, IntegerRangeField, JSONField, SearchVectorField, ) class Tag: def __init__(self, tag_id): self.tag_id = tag_id def __eq__(self, other): return isinstance(other, Tag) and self.tag_id == other.tag_id class TagField(models.SmallIntegerField): def from_db_value(self, value, expression, connection): if value is None: return value return Tag(int(value)) def to_python(self, value): if isinstance(value, Tag): return value if value is None: return value return Tag(int(value)) def get_prep_value(self, value): return value.tag_id class PostgreSQLModel(models.Model): class Meta: abstract = True required_db_vendor = 'postgresql' class IntegerArrayModel(PostgreSQLModel): field = ArrayField(models.IntegerField(), default=list, blank=True) class NullableIntegerArrayModel(PostgreSQLModel): field = ArrayField(models.IntegerField(), blank=True, null=True) class CharArrayModel(PostgreSQLModel): field = ArrayField(models.CharField(max_length=10)) class DateTimeArrayModel(PostgreSQLModel): datetimes = ArrayField(models.DateTimeField()) dates = ArrayField(models.DateField()) times = ArrayField(models.TimeField()) class NestedIntegerArrayModel(PostgreSQLModel): field = ArrayField(ArrayField(models.IntegerField())) class OtherTypesArrayModel(PostgreSQLModel): ips = ArrayField(models.GenericIPAddressField(), default=list) uuids = ArrayField(models.UUIDField(), default=list) decimals = ArrayField(models.DecimalField(max_digits=5, decimal_places=2), default=list) tags = ArrayField(TagField(), blank=True, null=True) json = ArrayField(JSONField(default=dict), default=list) int_ranges = ArrayField(IntegerRangeField(), blank=True, null=True) bigint_ranges = ArrayField(BigIntegerRangeField(), blank=True, null=True) class HStoreModel(PostgreSQLModel): field = HStoreField(blank=True, null=True) array_field = ArrayField(HStoreField(), null=True) class ArrayEnumModel(PostgreSQLModel): array_of_enums = ArrayField(EnumField(max_length=20)) class CharFieldModel(models.Model): field = models.CharField(max_length=16) class TextFieldModel(models.Model): field = models.TextField() def __str__(self): return self.field # Scene/Character/Line models are used to test full text search. They're # populated with content from Monty Python and the Holy Grail. class Scene(models.Model): scene = models.CharField(max_length=255) setting = models.CharField(max_length=255) def __str__(self): return self.scene class Character(models.Model): name = models.CharField(max_length=255) def __str__(self): return self.name class CITestModel(PostgreSQLModel): name = CICharField(primary_key=True, max_length=255) email = CIEmailField() description = CITextField() array_field = ArrayField(CITextField(), null=True) def __str__(self): return self.name class Line(PostgreSQLModel): scene = models.ForeignKey('Scene', models.CASCADE) character = models.ForeignKey('Character', models.CASCADE) dialogue = models.TextField(blank=True, null=True) dialogue_search_vector = SearchVectorField(blank=True, null=True) dialogue_config = models.CharField(max_length=100, blank=True, null=True) def __str__(self): return self.dialogue or '' class RangesModel(PostgreSQLModel): ints = IntegerRangeField(blank=True, null=True) bigints = BigIntegerRangeField(blank=True, null=True) decimals = DecimalRangeField(blank=True, null=True) timestamps = DateTimeRangeField(blank=True, null=True) timestamps_inner = DateTimeRangeField(blank=True, null=True) dates = DateRangeField(blank=True, null=True) dates_inner = DateRangeField(blank=True, null=True) class RangeLookupsModel(PostgreSQLModel): parent = models.ForeignKey(RangesModel, models.SET_NULL, blank=True, null=True) integer = models.IntegerField(blank=True, null=True) big_integer = models.BigIntegerField(blank=True, null=True) float = models.FloatField(blank=True, null=True) timestamp = models.DateTimeField(blank=True, null=True) date = models.DateField(blank=True, null=True) class JSONModel(PostgreSQLModel): field = JSONField(blank=True, null=True) field_custom = JSONField(blank=True, null=True, encoder=DjangoJSONEncoder) class ArrayFieldSubclass(ArrayField): def __init__(self, *args, **kwargs): super().__init__(models.IntegerField()) class AggregateTestModel(models.Model): """ To test postgres-specific general aggregation functions """ char_field = models.CharField(max_length=30, blank=True) integer_field = models.IntegerField(null=True) boolean_field = models.BooleanField(null=True) class StatTestModel(models.Model): """ To test postgres-specific aggregation functions for statistics """ int1 = models.IntegerField() int2 = models.IntegerField() related_field = models.ForeignKey(AggregateTestModel, models.SET_NULL, null=True) class NowTestModel(models.Model): when = models.DateTimeField(null=True, default=None) class UUIDTestModel(models.Model): uuid = models.UUIDField(default=None, null=True)
2f3859fc6174d9d88dcad49aa074212733e62cd60c188531fc74258dbba23ffe
import datetime from django.db import connection, transaction from django.db.models import F, Q from django.db.models.constraints import CheckConstraint from django.db.utils import IntegrityError from . import PostgreSQLTestCase from .models import RangesModel try: from psycopg2.extras import NumericRange except ImportError: pass class SchemaTests(PostgreSQLTestCase): def get_constraints(self, table): """Get the constraints on the table using a new cursor.""" with connection.cursor() as cursor: return connection.introspection.get_constraints(cursor, table) def test_check_constraint_range_value(self): constraint_name = 'ints_between' self.assertNotIn(constraint_name, self.get_constraints(RangesModel._meta.db_table)) constraint = CheckConstraint( check=Q(ints__contained_by=NumericRange(10, 30)), name=constraint_name, ) with connection.schema_editor() as editor: editor.add_constraint(RangesModel, constraint) with connection.cursor() as cursor: constraints = connection.introspection.get_constraints(cursor, RangesModel._meta.db_table) self.assertIn(constraint_name, constraints) with self.assertRaises(IntegrityError), transaction.atomic(): RangesModel.objects.create(ints=(20, 50)) RangesModel.objects.create(ints=(10, 30)) def test_check_constraint_daterange_contains(self): constraint_name = 'dates_contains' self.assertNotIn(constraint_name, self.get_constraints(RangesModel._meta.db_table)) constraint = CheckConstraint( check=Q(dates__contains=F('dates_inner')), name=constraint_name, ) with connection.schema_editor() as editor: editor.add_constraint(RangesModel, constraint) with connection.cursor() as cursor: constraints = connection.introspection.get_constraints(cursor, RangesModel._meta.db_table) self.assertIn(constraint_name, constraints) date_1 = datetime.date(2016, 1, 1) date_2 = datetime.date(2016, 1, 4) with self.assertRaises(IntegrityError), transaction.atomic(): RangesModel.objects.create( dates=(date_1, date_2), dates_inner=(date_1, date_2.replace(day=5)), ) RangesModel.objects.create( dates=(date_1, date_2), dates_inner=(date_1, date_2), ) def test_check_constraint_datetimerange_contains(self): constraint_name = 'timestamps_contains' self.assertNotIn(constraint_name, self.get_constraints(RangesModel._meta.db_table)) constraint = CheckConstraint( check=Q(timestamps__contains=F('timestamps_inner')), name=constraint_name, ) with connection.schema_editor() as editor: editor.add_constraint(RangesModel, constraint) with connection.cursor() as cursor: constraints = connection.introspection.get_constraints(cursor, RangesModel._meta.db_table) self.assertIn(constraint_name, constraints) datetime_1 = datetime.datetime(2016, 1, 1) datetime_2 = datetime.datetime(2016, 1, 2, 12) with self.assertRaises(IntegrityError), transaction.atomic(): RangesModel.objects.create( timestamps=(datetime_1, datetime_2), timestamps_inner=(datetime_1, datetime_2.replace(hour=13)), ) RangesModel.objects.create( timestamps=(datetime_1, datetime_2), timestamps_inner=(datetime_1, datetime_2), )
068e22fa0aa51a034ec5b40889adf10e4d100acda0d915c69e75cea4af1b5480
import datetime import pickle import sys import unittest from operator import attrgetter from django.core.exceptions import EmptyResultSet, FieldError from django.db import DEFAULT_DB_ALIAS, connection from django.db.models import Count, F, Q from django.db.models.sql.constants import LOUTER from django.db.models.sql.where import NothingNode, WhereNode from django.test import SimpleTestCase, TestCase, skipUnlessDBFeature from django.test.utils import CaptureQueriesContext from .models import ( FK1, Annotation, Article, Author, BaseA, Book, CategoryItem, CategoryRelationship, Celebrity, Channel, Chapter, Child, ChildObjectA, Classroom, CommonMixedCaseForeignKeys, Company, Cover, CustomPk, CustomPkTag, DateTimePK, Detail, DumbCategory, Eaten, Employment, ExtraInfo, Fan, Food, Identifier, Individual, Item, Job, JobResponsibilities, Join, LeafA, LeafB, LoopX, LoopZ, ManagedModel, Member, MixedCaseDbColumnCategoryItem, MixedCaseFieldCategoryItem, ModelA, ModelB, ModelC, ModelD, MyObject, NamedCategory, Node, Note, NullableName, Number, ObjectA, ObjectB, ObjectC, OneToOneCategory, Order, OrderItem, Page, Paragraph, Person, Plaything, PointerA, Program, ProxyCategory, ProxyObjectA, ProxyObjectB, Ranking, Related, RelatedIndividual, RelatedObject, Report, ReportComment, ReservedName, Responsibility, School, SharedConnection, SimpleCategory, SingleObject, SpecialCategory, Staff, StaffUser, Student, Tag, Task, Teacher, Ticket21203Child, Ticket21203Parent, Ticket23605A, Ticket23605B, Ticket23605C, TvChef, Valid, X, ) class Queries1Tests(TestCase): @classmethod def setUpTestData(cls): generic = NamedCategory.objects.create(name="Generic") cls.t1 = Tag.objects.create(name='t1', category=generic) cls.t2 = Tag.objects.create(name='t2', parent=cls.t1, category=generic) cls.t3 = Tag.objects.create(name='t3', parent=cls.t1) t4 = Tag.objects.create(name='t4', parent=cls.t3) cls.t5 = Tag.objects.create(name='t5', parent=cls.t3) cls.n1 = Note.objects.create(note='n1', misc='foo', id=1) cls.n2 = Note.objects.create(note='n2', misc='bar', id=2) cls.n3 = Note.objects.create(note='n3', misc='foo', id=3) ann1 = Annotation.objects.create(name='a1', tag=cls.t1) ann1.notes.add(cls.n1) ann2 = Annotation.objects.create(name='a2', tag=t4) ann2.notes.add(cls.n2, cls.n3) # Create these out of order so that sorting by 'id' will be different to sorting # by 'info'. Helps detect some problems later. cls.e2 = ExtraInfo.objects.create(info='e2', note=cls.n2, value=41) e1 = ExtraInfo.objects.create(info='e1', note=cls.n1, value=42) cls.a1 = Author.objects.create(name='a1', num=1001, extra=e1) cls.a2 = Author.objects.create(name='a2', num=2002, extra=e1) a3 = Author.objects.create(name='a3', num=3003, extra=cls.e2) cls.a4 = Author.objects.create(name='a4', num=4004, extra=cls.e2) cls.time1 = datetime.datetime(2007, 12, 19, 22, 25, 0) cls.time2 = datetime.datetime(2007, 12, 19, 21, 0, 0) time3 = datetime.datetime(2007, 12, 20, 22, 25, 0) time4 = datetime.datetime(2007, 12, 20, 21, 0, 0) cls.i1 = Item.objects.create(name='one', created=cls.time1, modified=cls.time1, creator=cls.a1, note=cls.n3) cls.i1.tags.set([cls.t1, cls.t2]) cls.i2 = Item.objects.create(name='two', created=cls.time2, creator=cls.a2, note=cls.n2) cls.i2.tags.set([cls.t1, cls.t3]) cls.i3 = Item.objects.create(name='three', created=time3, creator=cls.a2, note=cls.n3) i4 = Item.objects.create(name='four', created=time4, creator=cls.a4, note=cls.n3) i4.tags.set([t4]) cls.r1 = Report.objects.create(name='r1', creator=cls.a1) Report.objects.create(name='r2', creator=a3) Report.objects.create(name='r3') # Ordering by 'rank' gives us rank2, rank1, rank3. Ordering by the Meta.ordering # will be rank3, rank2, rank1. cls.rank1 = Ranking.objects.create(rank=2, author=cls.a2) Cover.objects.create(title="first", item=i4) Cover.objects.create(title="second", item=cls.i2) def test_subquery_condition(self): qs1 = Tag.objects.filter(pk__lte=0) qs2 = Tag.objects.filter(parent__in=qs1) qs3 = Tag.objects.filter(parent__in=qs2) self.assertEqual(qs3.query.subq_aliases, {'T', 'U', 'V'}) self.assertIn('v0', str(qs3.query).lower()) qs4 = qs3.filter(parent__in=qs1) self.assertEqual(qs4.query.subq_aliases, {'T', 'U', 'V'}) # It is possible to reuse U for the second subquery, no need to use W. self.assertNotIn('w0', str(qs4.query).lower()) # So, 'U0."id"' is referenced in SELECT and WHERE twice. self.assertEqual(str(qs4.query).lower().count('u0.'), 4) def test_ticket1050(self): self.assertQuerysetEqual( Item.objects.filter(tags__isnull=True), ['<Item: three>'] ) self.assertQuerysetEqual( Item.objects.filter(tags__id__isnull=True), ['<Item: three>'] ) def test_ticket1801(self): self.assertQuerysetEqual( Author.objects.filter(item=self.i2), ['<Author: a2>'] ) self.assertQuerysetEqual( Author.objects.filter(item=self.i3), ['<Author: a2>'] ) self.assertQuerysetEqual( Author.objects.filter(item=self.i2) & Author.objects.filter(item=self.i3), ['<Author: a2>'] ) def test_ticket2306(self): # Checking that no join types are "left outer" joins. query = Item.objects.filter(tags=self.t2).query self.assertNotIn(LOUTER, [x.join_type for x in query.alias_map.values()]) self.assertQuerysetEqual( Item.objects.filter(Q(tags=self.t1)).order_by('name'), ['<Item: one>', '<Item: two>'] ) self.assertQuerysetEqual( Item.objects.filter(Q(tags=self.t1)).filter(Q(tags=self.t2)), ['<Item: one>'] ) self.assertQuerysetEqual( Item.objects.filter(Q(tags=self.t1)).filter(Q(creator__name='fred') | Q(tags=self.t2)), ['<Item: one>'] ) # Each filter call is processed "at once" against a single table, so this is # different from the previous example as it tries to find tags that are two # things at once (rather than two tags). self.assertQuerysetEqual( Item.objects.filter(Q(tags=self.t1) & Q(tags=self.t2)), [] ) self.assertQuerysetEqual( Item.objects.filter(Q(tags=self.t1), Q(creator__name='fred') | Q(tags=self.t2)), [] ) qs = Author.objects.filter(ranking__rank=2, ranking__id=self.rank1.id) self.assertQuerysetEqual(list(qs), ['<Author: a2>']) self.assertEqual(2, qs.query.count_active_tables(), 2) qs = Author.objects.filter(ranking__rank=2).filter(ranking__id=self.rank1.id) self.assertEqual(qs.query.count_active_tables(), 3) def test_ticket4464(self): self.assertQuerysetEqual( Item.objects.filter(tags=self.t1).filter(tags=self.t2), ['<Item: one>'] ) self.assertQuerysetEqual( Item.objects.filter(tags__in=[self.t1, self.t2]).distinct().order_by('name'), ['<Item: one>', '<Item: two>'] ) self.assertQuerysetEqual( Item.objects.filter(tags__in=[self.t1, self.t2]).filter(tags=self.t3), ['<Item: two>'] ) # Make sure .distinct() works with slicing (this was broken in Oracle). self.assertQuerysetEqual( Item.objects.filter(tags__in=[self.t1, self.t2]).order_by('name')[:3], ['<Item: one>', '<Item: one>', '<Item: two>'] ) self.assertQuerysetEqual( Item.objects.filter(tags__in=[self.t1, self.t2]).distinct().order_by('name')[:3], ['<Item: one>', '<Item: two>'] ) def test_tickets_2080_3592(self): self.assertQuerysetEqual( Author.objects.filter(item__name='one') | Author.objects.filter(name='a3'), ['<Author: a1>', '<Author: a3>'] ) self.assertQuerysetEqual( Author.objects.filter(Q(item__name='one') | Q(name='a3')), ['<Author: a1>', '<Author: a3>'] ) self.assertQuerysetEqual( Author.objects.filter(Q(name='a3') | Q(item__name='one')), ['<Author: a1>', '<Author: a3>'] ) self.assertQuerysetEqual( Author.objects.filter(Q(item__name='three') | Q(report__name='r3')), ['<Author: a2>'] ) def test_ticket6074(self): # Merging two empty result sets shouldn't leave a queryset with no constraints # (which would match everything). self.assertQuerysetEqual(Author.objects.filter(Q(id__in=[])), []) self.assertQuerysetEqual( Author.objects.filter(Q(id__in=[]) | Q(id__in=[])), [] ) def test_tickets_1878_2939(self): self.assertEqual(Item.objects.values('creator').distinct().count(), 3) # Create something with a duplicate 'name' so that we can test multi-column # cases (which require some tricky SQL transformations under the covers). xx = Item(name='four', created=self.time1, creator=self.a2, note=self.n1) xx.save() self.assertEqual( Item.objects.exclude(name='two').values('creator', 'name').distinct().count(), 4 ) self.assertEqual( ( Item.objects .exclude(name='two') .extra(select={'foo': '%s'}, select_params=(1,)) .values('creator', 'name', 'foo') .distinct() .count() ), 4 ) self.assertEqual( ( Item.objects .exclude(name='two') .extra(select={'foo': '%s'}, select_params=(1,)) .values('creator', 'name') .distinct() .count() ), 4 ) xx.delete() def test_ticket7323(self): self.assertEqual(Item.objects.values('creator', 'name').count(), 4) def test_ticket2253(self): q1 = Item.objects.order_by('name') q2 = Item.objects.filter(id=self.i1.id) self.assertQuerysetEqual( q1, ['<Item: four>', '<Item: one>', '<Item: three>', '<Item: two>'] ) self.assertQuerysetEqual(q2, ['<Item: one>']) self.assertQuerysetEqual( (q1 | q2).order_by('name'), ['<Item: four>', '<Item: one>', '<Item: three>', '<Item: two>'] ) self.assertQuerysetEqual((q1 & q2).order_by('name'), ['<Item: one>']) q1 = Item.objects.filter(tags=self.t1) q2 = Item.objects.filter(note=self.n3, tags=self.t2) q3 = Item.objects.filter(creator=self.a4) self.assertQuerysetEqual( ((q1 & q2) | q3).order_by('name'), ['<Item: four>', '<Item: one>'] ) def test_order_by_tables(self): q1 = Item.objects.order_by('name') q2 = Item.objects.filter(id=self.i1.id) list(q2) combined_query = (q1 & q2).order_by('name').query self.assertEqual(len([ t for t in combined_query.alias_map if combined_query.alias_refcount[t] ]), 1) def test_order_by_join_unref(self): """ This test is related to the above one, testing that there aren't old JOINs in the query. """ qs = Celebrity.objects.order_by('greatest_fan__fan_of') self.assertIn('OUTER JOIN', str(qs.query)) qs = qs.order_by('id') self.assertNotIn('OUTER JOIN', str(qs.query)) def test_get_clears_ordering(self): """ get() should clear ordering for optimization purposes. """ with CaptureQueriesContext(connection) as captured_queries: Author.objects.order_by('name').get(pk=self.a1.pk) self.assertNotIn('order by', captured_queries[0]['sql'].lower()) def test_tickets_4088_4306(self): self.assertQuerysetEqual( Report.objects.filter(creator=1001), ['<Report: r1>'] ) self.assertQuerysetEqual( Report.objects.filter(creator__num=1001), ['<Report: r1>'] ) self.assertQuerysetEqual(Report.objects.filter(creator__id=1001), []) self.assertQuerysetEqual( Report.objects.filter(creator__id=self.a1.id), ['<Report: r1>'] ) self.assertQuerysetEqual( Report.objects.filter(creator__name='a1'), ['<Report: r1>'] ) def test_ticket4510(self): self.assertQuerysetEqual( Author.objects.filter(report__name='r1'), ['<Author: a1>'] ) def test_ticket7378(self): self.assertQuerysetEqual(self.a1.report_set.all(), ['<Report: r1>']) def test_tickets_5324_6704(self): self.assertQuerysetEqual( Item.objects.filter(tags__name='t4'), ['<Item: four>'] ) self.assertQuerysetEqual( Item.objects.exclude(tags__name='t4').order_by('name').distinct(), ['<Item: one>', '<Item: three>', '<Item: two>'] ) self.assertQuerysetEqual( Item.objects.exclude(tags__name='t4').order_by('name').distinct().reverse(), ['<Item: two>', '<Item: three>', '<Item: one>'] ) self.assertQuerysetEqual( Author.objects.exclude(item__name='one').distinct().order_by('name'), ['<Author: a2>', '<Author: a3>', '<Author: a4>'] ) # Excluding across a m2m relation when there is more than one related # object associated was problematic. self.assertQuerysetEqual( Item.objects.exclude(tags__name='t1').order_by('name'), ['<Item: four>', '<Item: three>'] ) self.assertQuerysetEqual( Item.objects.exclude(tags__name='t1').exclude(tags__name='t4'), ['<Item: three>'] ) # Excluding from a relation that cannot be NULL should not use outer joins. query = Item.objects.exclude(creator__in=[self.a1, self.a2]).query self.assertNotIn(LOUTER, [x.join_type for x in query.alias_map.values()]) # Similarly, when one of the joins cannot possibly, ever, involve NULL # values (Author -> ExtraInfo, in the following), it should never be # promoted to a left outer join. So the following query should only # involve one "left outer" join (Author -> Item is 0-to-many). qs = Author.objects.filter(id=self.a1.id).filter(Q(extra__note=self.n1) | Q(item__note=self.n3)) self.assertEqual( len([ x for x in qs.query.alias_map.values() if x.join_type == LOUTER and qs.query.alias_refcount[x.table_alias] ]), 1 ) # The previous changes shouldn't affect nullable foreign key joins. self.assertQuerysetEqual( Tag.objects.filter(parent__isnull=True).order_by('name'), ['<Tag: t1>'] ) self.assertQuerysetEqual( Tag.objects.exclude(parent__isnull=True).order_by('name'), ['<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>'] ) self.assertQuerysetEqual( Tag.objects.exclude(Q(parent__name='t1') | Q(parent__isnull=True)).order_by('name'), ['<Tag: t4>', '<Tag: t5>'] ) self.assertQuerysetEqual( Tag.objects.exclude(Q(parent__isnull=True) | Q(parent__name='t1')).order_by('name'), ['<Tag: t4>', '<Tag: t5>'] ) self.assertQuerysetEqual( Tag.objects.exclude(Q(parent__parent__isnull=True)).order_by('name'), ['<Tag: t4>', '<Tag: t5>'] ) self.assertQuerysetEqual( Tag.objects.filter(~Q(parent__parent__isnull=True)).order_by('name'), ['<Tag: t4>', '<Tag: t5>'] ) def test_ticket2091(self): t = Tag.objects.get(name='t4') self.assertQuerysetEqual( Item.objects.filter(tags__in=[t]), ['<Item: four>'] ) def test_avoid_infinite_loop_on_too_many_subqueries(self): x = Tag.objects.filter(pk=1) local_recursion_limit = sys.getrecursionlimit() // 16 msg = 'Maximum recursion depth exceeded: too many subqueries.' with self.assertRaisesMessage(RecursionError, msg): for i in range(local_recursion_limit + 2): x = Tag.objects.filter(pk__in=x) def test_reasonable_number_of_subq_aliases(self): x = Tag.objects.filter(pk=1) for _ in range(20): x = Tag.objects.filter(pk__in=x) self.assertEqual( x.query.subq_aliases, { 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'AA', 'AB', 'AC', 'AD', 'AE', 'AF', 'AG', 'AH', 'AI', 'AJ', 'AK', 'AL', 'AM', 'AN', } ) def test_heterogeneous_qs_combination(self): # Combining querysets built on different models should behave in a well-defined # fashion. We raise an error. with self.assertRaisesMessage(AssertionError, 'Cannot combine queries on two different base models.'): Author.objects.all() & Tag.objects.all() with self.assertRaisesMessage(AssertionError, 'Cannot combine queries on two different base models.'): Author.objects.all() | Tag.objects.all() def test_ticket3141(self): self.assertEqual(Author.objects.extra(select={'foo': '1'}).count(), 4) self.assertEqual( Author.objects.extra(select={'foo': '%s'}, select_params=(1,)).count(), 4 ) def test_ticket2400(self): self.assertQuerysetEqual( Author.objects.filter(item__isnull=True), ['<Author: a3>'] ) self.assertQuerysetEqual( Tag.objects.filter(item__isnull=True), ['<Tag: t5>'] ) def test_ticket2496(self): self.assertQuerysetEqual( Item.objects.extra(tables=['queries_author']).select_related().order_by('name')[:1], ['<Item: four>'] ) def test_error_raised_on_filter_with_dictionary(self): with self.assertRaisesMessage(FieldError, 'Cannot parse keyword query as dict'): Note.objects.filter({'note': 'n1', 'misc': 'foo'}) def test_tickets_2076_7256(self): # Ordering on related tables should be possible, even if the table is # not otherwise involved. self.assertQuerysetEqual( Item.objects.order_by('note__note', 'name'), ['<Item: two>', '<Item: four>', '<Item: one>', '<Item: three>'] ) # Ordering on a related field should use the remote model's default # ordering as a final step. self.assertQuerysetEqual( Author.objects.order_by('extra', '-name'), ['<Author: a2>', '<Author: a1>', '<Author: a4>', '<Author: a3>'] ) # Using remote model default ordering can span multiple models (in this # case, Cover is ordered by Item's default, which uses Note's default). self.assertQuerysetEqual( Cover.objects.all(), ['<Cover: first>', '<Cover: second>'] ) # If the remote model does not have a default ordering, we order by its 'id' # field. self.assertQuerysetEqual( Item.objects.order_by('creator', 'name'), ['<Item: one>', '<Item: three>', '<Item: two>', '<Item: four>'] ) # Ordering by a many-valued attribute (e.g. a many-to-many or reverse # ForeignKey) is legal, but the results might not make sense. That # isn't Django's problem. Garbage in, garbage out. self.assertQuerysetEqual( Item.objects.filter(tags__isnull=False).order_by('tags', 'id'), ['<Item: one>', '<Item: two>', '<Item: one>', '<Item: two>', '<Item: four>'] ) # If we replace the default ordering, Django adjusts the required # tables automatically. Item normally requires a join with Note to do # the default ordering, but that isn't needed here. qs = Item.objects.order_by('name') self.assertQuerysetEqual( qs, ['<Item: four>', '<Item: one>', '<Item: three>', '<Item: two>'] ) self.assertEqual(len(qs.query.alias_map), 1) def test_tickets_2874_3002(self): qs = Item.objects.select_related().order_by('note__note', 'name') self.assertQuerysetEqual( qs, ['<Item: two>', '<Item: four>', '<Item: one>', '<Item: three>'] ) # This is also a good select_related() test because there are multiple # Note entries in the SQL. The two Note items should be different. self.assertEqual(repr(qs[0].note), '<Note: n2>') self.assertEqual(repr(qs[0].creator.extra.note), '<Note: n1>') def test_ticket3037(self): self.assertQuerysetEqual( Item.objects.filter(Q(creator__name='a3', name='two') | Q(creator__name='a4', name='four')), ['<Item: four>'] ) def test_tickets_5321_7070(self): # Ordering columns must be included in the output columns. Note that # this means results that might otherwise be distinct are not (if there # are multiple values in the ordering cols), as in this example. This # isn't a bug; it's a warning to be careful with the selection of # ordering columns. self.assertSequenceEqual( Note.objects.values('misc').distinct().order_by('note', '-misc'), [{'misc': 'foo'}, {'misc': 'bar'}, {'misc': 'foo'}] ) def test_ticket4358(self): # If you don't pass any fields to values(), relation fields are # returned as "foo_id" keys, not "foo". For consistency, you should be # able to pass "foo_id" in the fields list and have it work, too. We # actually allow both "foo" and "foo_id". # The *_id version is returned by default. self.assertIn('note_id', ExtraInfo.objects.values()[0]) # You can also pass it in explicitly. self.assertSequenceEqual(ExtraInfo.objects.values('note_id'), [{'note_id': 1}, {'note_id': 2}]) # ...or use the field name. self.assertSequenceEqual(ExtraInfo.objects.values('note'), [{'note': 1}, {'note': 2}]) def test_ticket6154(self): # Multiple filter statements are joined using "AND" all the time. self.assertQuerysetEqual( Author.objects.filter(id=self.a1.id).filter(Q(extra__note=self.n1) | Q(item__note=self.n3)), ['<Author: a1>'] ) self.assertQuerysetEqual( Author.objects.filter(Q(extra__note=self.n1) | Q(item__note=self.n3)).filter(id=self.a1.id), ['<Author: a1>'] ) def test_ticket6981(self): self.assertQuerysetEqual( Tag.objects.select_related('parent').order_by('name'), ['<Tag: t1>', '<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>'] ) def test_ticket9926(self): self.assertQuerysetEqual( Tag.objects.select_related("parent", "category").order_by('name'), ['<Tag: t1>', '<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>'] ) self.assertQuerysetEqual( Tag.objects.select_related('parent', "parent__category").order_by('name'), ['<Tag: t1>', '<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>'] ) def test_tickets_6180_6203(self): # Dates with limits and/or counts self.assertEqual(Item.objects.count(), 4) self.assertEqual(Item.objects.datetimes('created', 'month').count(), 1) self.assertEqual(Item.objects.datetimes('created', 'day').count(), 2) self.assertEqual(len(Item.objects.datetimes('created', 'day')), 2) self.assertEqual(Item.objects.datetimes('created', 'day')[0], datetime.datetime(2007, 12, 19, 0, 0)) def test_tickets_7087_12242(self): # Dates with extra select columns self.assertQuerysetEqual( Item.objects.datetimes('created', 'day').extra(select={'a': 1}), ['datetime.datetime(2007, 12, 19, 0, 0)', 'datetime.datetime(2007, 12, 20, 0, 0)'] ) self.assertQuerysetEqual( Item.objects.extra(select={'a': 1}).datetimes('created', 'day'), ['datetime.datetime(2007, 12, 19, 0, 0)', 'datetime.datetime(2007, 12, 20, 0, 0)'] ) name = "one" self.assertQuerysetEqual( Item.objects.datetimes('created', 'day').extra(where=['name=%s'], params=[name]), ['datetime.datetime(2007, 12, 19, 0, 0)'] ) self.assertQuerysetEqual( Item.objects.extra(where=['name=%s'], params=[name]).datetimes('created', 'day'), ['datetime.datetime(2007, 12, 19, 0, 0)'] ) def test_ticket7155(self): # Nullable dates self.assertQuerysetEqual( Item.objects.datetimes('modified', 'day'), ['datetime.datetime(2007, 12, 19, 0, 0)'] ) def test_ticket7098(self): # Make sure semi-deprecated ordering by related models syntax still # works. self.assertSequenceEqual( Item.objects.values('note__note').order_by('queries_note.note', 'id'), [{'note__note': 'n2'}, {'note__note': 'n3'}, {'note__note': 'n3'}, {'note__note': 'n3'}] ) def test_ticket7096(self): # Make sure exclude() with multiple conditions continues to work. self.assertQuerysetEqual( Tag.objects.filter(parent=self.t1, name='t3').order_by('name'), ['<Tag: t3>'] ) self.assertQuerysetEqual( Tag.objects.exclude(parent=self.t1, name='t3').order_by('name'), ['<Tag: t1>', '<Tag: t2>', '<Tag: t4>', '<Tag: t5>'] ) self.assertQuerysetEqual( Item.objects.exclude(tags__name='t1', name='one').order_by('name').distinct(), ['<Item: four>', '<Item: three>', '<Item: two>'] ) self.assertQuerysetEqual( Item.objects.filter(name__in=['three', 'four']).exclude(tags__name='t1').order_by('name'), ['<Item: four>', '<Item: three>'] ) # More twisted cases, involving nested negations. self.assertQuerysetEqual( Item.objects.exclude(~Q(tags__name='t1', name='one')), ['<Item: one>'] ) self.assertQuerysetEqual( Item.objects.filter(~Q(tags__name='t1', name='one'), name='two'), ['<Item: two>'] ) self.assertQuerysetEqual( Item.objects.exclude(~Q(tags__name='t1', name='one'), name='two'), ['<Item: four>', '<Item: one>', '<Item: three>'] ) def test_tickets_7204_7506(self): # Make sure querysets with related fields can be pickled. If this # doesn't crash, it's a Good Thing. pickle.dumps(Item.objects.all()) def test_ticket7813(self): # We should also be able to pickle things that use select_related(). # The only tricky thing here is to ensure that we do the related # selections properly after unpickling. qs = Item.objects.select_related() query = qs.query.get_compiler(qs.db).as_sql()[0] query2 = pickle.loads(pickle.dumps(qs.query)) self.assertEqual( query2.get_compiler(qs.db).as_sql()[0], query ) def test_deferred_load_qs_pickling(self): # Check pickling of deferred-loading querysets qs = Item.objects.defer('name', 'creator') q2 = pickle.loads(pickle.dumps(qs)) self.assertEqual(list(qs), list(q2)) q3 = pickle.loads(pickle.dumps(qs, pickle.HIGHEST_PROTOCOL)) self.assertEqual(list(qs), list(q3)) def test_ticket7277(self): self.assertQuerysetEqual( self.n1.annotation_set.filter( Q(tag=self.t5) | Q(tag__children=self.t5) | Q(tag__children__children=self.t5) ), ['<Annotation: a1>'] ) def test_tickets_7448_7707(self): # Complex objects should be converted to strings before being used in # lookups. self.assertQuerysetEqual( Item.objects.filter(created__in=[self.time1, self.time2]), ['<Item: one>', '<Item: two>'] ) def test_ticket7235(self): # An EmptyQuerySet should not raise exceptions if it is filtered. Eaten.objects.create(meal='m') q = Eaten.objects.none() with self.assertNumQueries(0): self.assertQuerysetEqual(q.all(), []) self.assertQuerysetEqual(q.filter(meal='m'), []) self.assertQuerysetEqual(q.exclude(meal='m'), []) self.assertQuerysetEqual(q.complex_filter({'pk': 1}), []) self.assertQuerysetEqual(q.select_related('food'), []) self.assertQuerysetEqual(q.annotate(Count('food')), []) self.assertQuerysetEqual(q.order_by('meal', 'food'), []) self.assertQuerysetEqual(q.distinct(), []) self.assertQuerysetEqual( q.extra(select={'foo': "1"}), [] ) self.assertQuerysetEqual(q.reverse(), []) q.query.low_mark = 1 with self.assertRaisesMessage(AssertionError, 'Cannot change a query once a slice has been taken'): q.extra(select={'foo': "1"}) self.assertQuerysetEqual(q.defer('meal'), []) self.assertQuerysetEqual(q.only('meal'), []) def test_ticket7791(self): # There were "issues" when ordering and distinct-ing on fields related # via ForeignKeys. self.assertEqual( len(Note.objects.order_by('extrainfo__info').distinct()), 3 ) # Pickling of QuerySets using datetimes() should work. qs = Item.objects.datetimes('created', 'month') pickle.loads(pickle.dumps(qs)) def test_ticket9997(self): # If a ValuesList or Values queryset is passed as an inner query, we # make sure it's only requesting a single value and use that as the # thing to select. self.assertQuerysetEqual( Tag.objects.filter(name__in=Tag.objects.filter(parent=self.t1).values('name')), ['<Tag: t2>', '<Tag: t3>'] ) # Multi-valued values() and values_list() querysets should raise errors. with self.assertRaisesMessage(TypeError, 'Cannot use multi-field values as a filter value.'): Tag.objects.filter(name__in=Tag.objects.filter(parent=self.t1).values('name', 'id')) with self.assertRaisesMessage(TypeError, 'Cannot use multi-field values as a filter value.'): Tag.objects.filter(name__in=Tag.objects.filter(parent=self.t1).values_list('name', 'id')) def test_ticket9985(self): # qs.values_list(...).values(...) combinations should work. self.assertSequenceEqual( Note.objects.values_list("note", flat=True).values("id").order_by("id"), [{'id': 1}, {'id': 2}, {'id': 3}] ) self.assertQuerysetEqual( Annotation.objects.filter(notes__in=Note.objects.filter(note="n1").values_list('note').values('id')), ['<Annotation: a1>'] ) def test_ticket10205(self): # When bailing out early because of an empty "__in" filter, we need # to set things up correctly internally so that subqueries can continue properly. self.assertEqual(Tag.objects.filter(name__in=()).update(name="foo"), 0) def test_ticket10432(self): # Testing an empty "__in" filter with a generator as the value. def f(): return iter([]) n_obj = Note.objects.all()[0] def g(): yield n_obj.pk self.assertQuerysetEqual(Note.objects.filter(pk__in=f()), []) self.assertEqual(list(Note.objects.filter(pk__in=g())), [n_obj]) def test_ticket10742(self): # Queries used in an __in clause don't execute subqueries subq = Author.objects.filter(num__lt=3000) qs = Author.objects.filter(pk__in=subq) self.assertQuerysetEqual(qs, ['<Author: a1>', '<Author: a2>']) # The subquery result cache should not be populated self.assertIsNone(subq._result_cache) subq = Author.objects.filter(num__lt=3000) qs = Author.objects.exclude(pk__in=subq) self.assertQuerysetEqual(qs, ['<Author: a3>', '<Author: a4>']) # The subquery result cache should not be populated self.assertIsNone(subq._result_cache) subq = Author.objects.filter(num__lt=3000) self.assertQuerysetEqual( Author.objects.filter(Q(pk__in=subq) & Q(name='a1')), ['<Author: a1>'] ) # The subquery result cache should not be populated self.assertIsNone(subq._result_cache) def test_ticket7076(self): # Excluding shouldn't eliminate NULL entries. self.assertQuerysetEqual( Item.objects.exclude(modified=self.time1).order_by('name'), ['<Item: four>', '<Item: three>', '<Item: two>'] ) self.assertQuerysetEqual( Tag.objects.exclude(parent__name=self.t1.name), ['<Tag: t1>', '<Tag: t4>', '<Tag: t5>'] ) def test_ticket7181(self): # Ordering by related tables should accommodate nullable fields (this # test is a little tricky, since NULL ordering is database dependent. # Instead, we just count the number of results). self.assertEqual(len(Tag.objects.order_by('parent__name')), 5) # Empty querysets can be merged with others. self.assertQuerysetEqual( Note.objects.none() | Note.objects.all(), ['<Note: n1>', '<Note: n2>', '<Note: n3>'] ) self.assertQuerysetEqual( Note.objects.all() | Note.objects.none(), ['<Note: n1>', '<Note: n2>', '<Note: n3>'] ) self.assertQuerysetEqual(Note.objects.none() & Note.objects.all(), []) self.assertQuerysetEqual(Note.objects.all() & Note.objects.none(), []) def test_ticket9411(self): # Make sure bump_prefix() (an internal Query method) doesn't (re-)break. It's # sufficient that this query runs without error. qs = Tag.objects.values_list('id', flat=True).order_by('id') qs.query.bump_prefix(qs.query) first = qs[0] self.assertEqual(list(qs), list(range(first, first + 5))) def test_ticket8439(self): # Complex combinations of conjunctions, disjunctions and nullable # relations. self.assertQuerysetEqual( Author.objects.filter(Q(item__note__extrainfo=self.e2) | Q(report=self.r1, name='xyz')), ['<Author: a2>'] ) self.assertQuerysetEqual( Author.objects.filter(Q(report=self.r1, name='xyz') | Q(item__note__extrainfo=self.e2)), ['<Author: a2>'] ) self.assertQuerysetEqual( Annotation.objects.filter(Q(tag__parent=self.t1) | Q(notes__note='n1', name='a1')), ['<Annotation: a1>'] ) xx = ExtraInfo.objects.create(info='xx', note=self.n3) self.assertQuerysetEqual( Note.objects.filter(Q(extrainfo__author=self.a1) | Q(extrainfo=xx)), ['<Note: n1>', '<Note: n3>'] ) q = Note.objects.filter(Q(extrainfo__author=self.a1) | Q(extrainfo=xx)).query self.assertEqual( len([x for x in q.alias_map.values() if x.join_type == LOUTER and q.alias_refcount[x.table_alias]]), 1 ) def test_ticket17429(self): """ Meta.ordering=None works the same as Meta.ordering=[] """ original_ordering = Tag._meta.ordering Tag._meta.ordering = None try: self.assertQuerysetEqual( Tag.objects.all(), ['<Tag: t1>', '<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>'], ordered=False ) finally: Tag._meta.ordering = original_ordering def test_exclude(self): self.assertQuerysetEqual( Item.objects.exclude(tags__name='t4'), [repr(i) for i in Item.objects.filter(~Q(tags__name='t4'))]) self.assertQuerysetEqual( Item.objects.exclude(Q(tags__name='t4') | Q(tags__name='t3')), [repr(i) for i in Item.objects.filter(~(Q(tags__name='t4') | Q(tags__name='t3')))]) self.assertQuerysetEqual( Item.objects.exclude(Q(tags__name='t4') | ~Q(tags__name='t3')), [repr(i) for i in Item.objects.filter(~(Q(tags__name='t4') | ~Q(tags__name='t3')))]) def test_nested_exclude(self): self.assertQuerysetEqual( Item.objects.exclude(~Q(tags__name='t4')), [repr(i) for i in Item.objects.filter(~~Q(tags__name='t4'))]) def test_double_exclude(self): self.assertQuerysetEqual( Item.objects.filter(Q(tags__name='t4')), [repr(i) for i in Item.objects.filter(~~Q(tags__name='t4'))]) self.assertQuerysetEqual( Item.objects.filter(Q(tags__name='t4')), [repr(i) for i in Item.objects.filter(~Q(~Q(tags__name='t4')))]) def test_exclude_in(self): self.assertQuerysetEqual( Item.objects.exclude(Q(tags__name__in=['t4', 't3'])), [repr(i) for i in Item.objects.filter(~Q(tags__name__in=['t4', 't3']))]) self.assertQuerysetEqual( Item.objects.filter(Q(tags__name__in=['t4', 't3'])), [repr(i) for i in Item.objects.filter(~~Q(tags__name__in=['t4', 't3']))]) def test_ticket_10790_1(self): # Querying direct fields with isnull should trim the left outer join. # It also should not create INNER JOIN. q = Tag.objects.filter(parent__isnull=True) self.assertQuerysetEqual(q, ['<Tag: t1>']) self.assertNotIn('JOIN', str(q.query)) q = Tag.objects.filter(parent__isnull=False) self.assertQuerysetEqual( q, ['<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>'], ) self.assertNotIn('JOIN', str(q.query)) q = Tag.objects.exclude(parent__isnull=True) self.assertQuerysetEqual( q, ['<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>'], ) self.assertNotIn('JOIN', str(q.query)) q = Tag.objects.exclude(parent__isnull=False) self.assertQuerysetEqual(q, ['<Tag: t1>']) self.assertNotIn('JOIN', str(q.query)) q = Tag.objects.exclude(parent__parent__isnull=False) self.assertQuerysetEqual( q, ['<Tag: t1>', '<Tag: t2>', '<Tag: t3>'], ) self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 1) self.assertNotIn('INNER JOIN', str(q.query)) def test_ticket_10790_2(self): # Querying across several tables should strip only the last outer join, # while preserving the preceding inner joins. q = Tag.objects.filter(parent__parent__isnull=False) self.assertQuerysetEqual( q, ['<Tag: t4>', '<Tag: t5>'], ) self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0) self.assertEqual(str(q.query).count('INNER JOIN'), 1) # Querying without isnull should not convert anything to left outer join. q = Tag.objects.filter(parent__parent=self.t1) self.assertQuerysetEqual( q, ['<Tag: t4>', '<Tag: t5>'], ) self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0) self.assertEqual(str(q.query).count('INNER JOIN'), 1) def test_ticket_10790_3(self): # Querying via indirect fields should populate the left outer join q = NamedCategory.objects.filter(tag__isnull=True) self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 1) # join to dumbcategory ptr_id self.assertEqual(str(q.query).count('INNER JOIN'), 1) self.assertQuerysetEqual(q, []) # Querying across several tables should strip only the last join, while # preserving the preceding left outer joins. q = NamedCategory.objects.filter(tag__parent__isnull=True) self.assertEqual(str(q.query).count('INNER JOIN'), 1) self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 1) self.assertQuerysetEqual(q, ['<NamedCategory: Generic>']) def test_ticket_10790_4(self): # Querying across m2m field should not strip the m2m table from join. q = Author.objects.filter(item__tags__isnull=True) self.assertQuerysetEqual( q, ['<Author: a2>', '<Author: a3>'], ) self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 2) self.assertNotIn('INNER JOIN', str(q.query)) q = Author.objects.filter(item__tags__parent__isnull=True) self.assertQuerysetEqual( q, ['<Author: a1>', '<Author: a2>', '<Author: a2>', '<Author: a3>'], ) self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 3) self.assertNotIn('INNER JOIN', str(q.query)) def test_ticket_10790_5(self): # Querying with isnull=False across m2m field should not create outer joins q = Author.objects.filter(item__tags__isnull=False) self.assertQuerysetEqual( q, ['<Author: a1>', '<Author: a1>', '<Author: a2>', '<Author: a2>', '<Author: a4>'] ) self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0) self.assertEqual(str(q.query).count('INNER JOIN'), 2) q = Author.objects.filter(item__tags__parent__isnull=False) self.assertQuerysetEqual( q, ['<Author: a1>', '<Author: a2>', '<Author: a4>'] ) self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0) self.assertEqual(str(q.query).count('INNER JOIN'), 3) q = Author.objects.filter(item__tags__parent__parent__isnull=False) self.assertQuerysetEqual( q, ['<Author: a4>'] ) self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0) self.assertEqual(str(q.query).count('INNER JOIN'), 4) def test_ticket_10790_6(self): # Querying with isnull=True across m2m field should not create inner joins # and strip last outer join q = Author.objects.filter(item__tags__parent__parent__isnull=True) self.assertQuerysetEqual( q, ['<Author: a1>', '<Author: a1>', '<Author: a2>', '<Author: a2>', '<Author: a2>', '<Author: a3>'] ) self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 4) self.assertEqual(str(q.query).count('INNER JOIN'), 0) q = Author.objects.filter(item__tags__parent__isnull=True) self.assertQuerysetEqual( q, ['<Author: a1>', '<Author: a2>', '<Author: a2>', '<Author: a3>'] ) self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 3) self.assertEqual(str(q.query).count('INNER JOIN'), 0) def test_ticket_10790_7(self): # Reverse querying with isnull should not strip the join q = Author.objects.filter(item__isnull=True) self.assertQuerysetEqual( q, ['<Author: a3>'] ) self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 1) self.assertEqual(str(q.query).count('INNER JOIN'), 0) q = Author.objects.filter(item__isnull=False) self.assertQuerysetEqual( q, ['<Author: a1>', '<Author: a2>', '<Author: a2>', '<Author: a4>'] ) self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0) self.assertEqual(str(q.query).count('INNER JOIN'), 1) def test_ticket_10790_8(self): # Querying with combined q-objects should also strip the left outer join q = Tag.objects.filter(Q(parent__isnull=True) | Q(parent=self.t1)) self.assertQuerysetEqual( q, ['<Tag: t1>', '<Tag: t2>', '<Tag: t3>'] ) self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0) self.assertEqual(str(q.query).count('INNER JOIN'), 0) def test_ticket_10790_combine(self): # Combining queries should not re-populate the left outer join q1 = Tag.objects.filter(parent__isnull=True) q2 = Tag.objects.filter(parent__isnull=False) q3 = q1 | q2 self.assertQuerysetEqual( q3, ['<Tag: t1>', '<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>'], ) self.assertEqual(str(q3.query).count('LEFT OUTER JOIN'), 0) self.assertEqual(str(q3.query).count('INNER JOIN'), 0) q3 = q1 & q2 self.assertQuerysetEqual(q3, []) self.assertEqual(str(q3.query).count('LEFT OUTER JOIN'), 0) self.assertEqual(str(q3.query).count('INNER JOIN'), 0) q2 = Tag.objects.filter(parent=self.t1) q3 = q1 | q2 self.assertQuerysetEqual( q3, ['<Tag: t1>', '<Tag: t2>', '<Tag: t3>'] ) self.assertEqual(str(q3.query).count('LEFT OUTER JOIN'), 0) self.assertEqual(str(q3.query).count('INNER JOIN'), 0) q3 = q2 | q1 self.assertQuerysetEqual( q3, ['<Tag: t1>', '<Tag: t2>', '<Tag: t3>'] ) self.assertEqual(str(q3.query).count('LEFT OUTER JOIN'), 0) self.assertEqual(str(q3.query).count('INNER JOIN'), 0) q1 = Tag.objects.filter(parent__isnull=True) q2 = Tag.objects.filter(parent__parent__isnull=True) q3 = q1 | q2 self.assertQuerysetEqual( q3, ['<Tag: t1>', '<Tag: t2>', '<Tag: t3>'] ) self.assertEqual(str(q3.query).count('LEFT OUTER JOIN'), 1) self.assertEqual(str(q3.query).count('INNER JOIN'), 0) q3 = q2 | q1 self.assertQuerysetEqual( q3, ['<Tag: t1>', '<Tag: t2>', '<Tag: t3>'] ) self.assertEqual(str(q3.query).count('LEFT OUTER JOIN'), 1) self.assertEqual(str(q3.query).count('INNER JOIN'), 0) def test_ticket19672(self): self.assertQuerysetEqual( Report.objects.filter(Q(creator__isnull=False) & ~Q(creator__extra__value=41)), ['<Report: r1>'] ) def test_ticket_20250(self): # A negated Q along with an annotated queryset failed in Django 1.4 qs = Author.objects.annotate(Count('item')) qs = qs.filter(~Q(extra__value=0)).order_by('name') self.assertIn('SELECT', str(qs.query)) self.assertQuerysetEqual( qs, ['<Author: a1>', '<Author: a2>', '<Author: a3>', '<Author: a4>'] ) def test_lookup_constraint_fielderror(self): msg = ( "Cannot resolve keyword 'unknown_field' into field. Choices are: " "annotation, category, category_id, children, id, item, " "managedmodel, name, note, parent, parent_id" ) with self.assertRaisesMessage(FieldError, msg): Tag.objects.filter(unknown_field__name='generic') def test_common_mixed_case_foreign_keys(self): """ Valid query should be generated when fields fetched from joined tables include FKs whose names only differ by case. """ c1 = SimpleCategory.objects.create(name='c1') c2 = SimpleCategory.objects.create(name='c2') c3 = SimpleCategory.objects.create(name='c3') category = CategoryItem.objects.create(category=c1) mixed_case_field_category = MixedCaseFieldCategoryItem.objects.create(CaTeGoRy=c2) mixed_case_db_column_category = MixedCaseDbColumnCategoryItem.objects.create(category=c3) CommonMixedCaseForeignKeys.objects.create( category=category, mixed_case_field_category=mixed_case_field_category, mixed_case_db_column_category=mixed_case_db_column_category, ) qs = CommonMixedCaseForeignKeys.objects.values( 'category', 'mixed_case_field_category', 'mixed_case_db_column_category', 'category__category', 'mixed_case_field_category__CaTeGoRy', 'mixed_case_db_column_category__category', ) self.assertTrue(qs.first()) def test_excluded_intermediary_m2m_table_joined(self): self.assertSequenceEqual( Note.objects.filter(~Q(tag__annotation__name=F('note'))), [self.n1, self.n2, self.n3], ) self.assertSequenceEqual( Note.objects.filter(tag__annotation__name='a1').filter(~Q(tag__annotation__name=F('note'))), [], ) class Queries2Tests(TestCase): @classmethod def setUpTestData(cls): Number.objects.create(num=4) Number.objects.create(num=8) Number.objects.create(num=12) def test_ticket4289(self): # A slight variation on the restricting the filtering choices by the # lookup constraints. self.assertQuerysetEqual(Number.objects.filter(num__lt=4), []) self.assertQuerysetEqual(Number.objects.filter(num__gt=8, num__lt=12), []) self.assertQuerysetEqual( Number.objects.filter(num__gt=8, num__lt=13), ['<Number: 12>'] ) self.assertQuerysetEqual( Number.objects.filter(Q(num__lt=4) | Q(num__gt=8, num__lt=12)), [] ) self.assertQuerysetEqual( Number.objects.filter(Q(num__gt=8, num__lt=12) | Q(num__lt=4)), [] ) self.assertQuerysetEqual( Number.objects.filter(Q(num__gt=8) & Q(num__lt=12) | Q(num__lt=4)), [] ) self.assertQuerysetEqual( Number.objects.filter(Q(num__gt=7) & Q(num__lt=12) | Q(num__lt=4)), ['<Number: 8>'] ) def test_ticket12239(self): # Custom lookups are registered to round float values correctly on gte # and lt IntegerField queries. self.assertQuerysetEqual( Number.objects.filter(num__gt=11.9), ['<Number: 12>'] ) self.assertQuerysetEqual(Number.objects.filter(num__gt=12), []) self.assertQuerysetEqual(Number.objects.filter(num__gt=12.0), []) self.assertQuerysetEqual(Number.objects.filter(num__gt=12.1), []) self.assertQuerysetEqual( Number.objects.filter(num__lt=12), ['<Number: 4>', '<Number: 8>'], ordered=False ) self.assertQuerysetEqual( Number.objects.filter(num__lt=12.0), ['<Number: 4>', '<Number: 8>'], ordered=False ) self.assertQuerysetEqual( Number.objects.filter(num__lt=12.1), ['<Number: 4>', '<Number: 8>', '<Number: 12>'], ordered=False ) self.assertQuerysetEqual( Number.objects.filter(num__gte=11.9), ['<Number: 12>'] ) self.assertQuerysetEqual( Number.objects.filter(num__gte=12), ['<Number: 12>'] ) self.assertQuerysetEqual( Number.objects.filter(num__gte=12.0), ['<Number: 12>'] ) self.assertQuerysetEqual(Number.objects.filter(num__gte=12.1), []) self.assertQuerysetEqual(Number.objects.filter(num__gte=12.9), []) self.assertQuerysetEqual( Number.objects.filter(num__lte=11.9), ['<Number: 4>', '<Number: 8>'], ordered=False ) self.assertQuerysetEqual( Number.objects.filter(num__lte=12), ['<Number: 4>', '<Number: 8>', '<Number: 12>'], ordered=False ) self.assertQuerysetEqual( Number.objects.filter(num__lte=12.0), ['<Number: 4>', '<Number: 8>', '<Number: 12>'], ordered=False ) self.assertQuerysetEqual( Number.objects.filter(num__lte=12.1), ['<Number: 4>', '<Number: 8>', '<Number: 12>'], ordered=False ) self.assertQuerysetEqual( Number.objects.filter(num__lte=12.9), ['<Number: 4>', '<Number: 8>', '<Number: 12>'], ordered=False ) def test_ticket7759(self): # Count should work with a partially read result set. count = Number.objects.count() qs = Number.objects.all() def run(): for obj in qs: return qs.count() == count self.assertTrue(run()) class Queries3Tests(TestCase): def test_ticket7107(self): # This shouldn't create an infinite loop. self.assertQuerysetEqual(Valid.objects.all(), []) def test_ticket8683(self): # An error should be raised when QuerySet.datetimes() is passed the # wrong type of field. with self.assertRaisesMessage(AssertionError, "'name' isn't a DateField, TimeField, or DateTimeField."): Item.objects.datetimes('name', 'month') def test_ticket22023(self): with self.assertRaisesMessage(TypeError, "Cannot call only() after .values() or .values_list()"): Valid.objects.values().only() with self.assertRaisesMessage(TypeError, "Cannot call defer() after .values() or .values_list()"): Valid.objects.values().defer() class Queries4Tests(TestCase): @classmethod def setUpTestData(cls): generic = NamedCategory.objects.create(name="Generic") cls.t1 = Tag.objects.create(name='t1', category=generic) n1 = Note.objects.create(note='n1', misc='foo') n2 = Note.objects.create(note='n2', misc='bar') e1 = ExtraInfo.objects.create(info='e1', note=n1) e2 = ExtraInfo.objects.create(info='e2', note=n2) cls.a1 = Author.objects.create(name='a1', num=1001, extra=e1) cls.a3 = Author.objects.create(name='a3', num=3003, extra=e2) cls.r1 = Report.objects.create(name='r1', creator=cls.a1) cls.r2 = Report.objects.create(name='r2', creator=cls.a3) cls.r3 = Report.objects.create(name='r3') Item.objects.create(name='i1', created=datetime.datetime.now(), note=n1, creator=cls.a1) Item.objects.create(name='i2', created=datetime.datetime.now(), note=n1, creator=cls.a3) def test_ticket24525(self): tag = Tag.objects.create() anth100 = tag.note_set.create(note='ANTH', misc='100') math101 = tag.note_set.create(note='MATH', misc='101') s1 = tag.annotation_set.create(name='1') s2 = tag.annotation_set.create(name='2') s1.notes.set([math101, anth100]) s2.notes.set([math101]) result = math101.annotation_set.all() & tag.annotation_set.exclude(notes__in=[anth100]) self.assertEqual(list(result), [s2]) def test_ticket11811(self): unsaved_category = NamedCategory(name="Other") msg = 'Unsaved model instance <NamedCategory: Other> cannot be used in an ORM query.' with self.assertRaisesMessage(ValueError, msg): Tag.objects.filter(pk=self.t1.pk).update(category=unsaved_category) def test_ticket14876(self): # Note: when combining the query we need to have information available # about the join type of the trimmed "creator__isnull" join. If we # don't have that information, then the join is created as INNER JOIN # and results will be incorrect. q1 = Report.objects.filter(Q(creator__isnull=True) | Q(creator__extra__info='e1')) q2 = Report.objects.filter(Q(creator__isnull=True)) | Report.objects.filter(Q(creator__extra__info='e1')) self.assertQuerysetEqual(q1, ["<Report: r1>", "<Report: r3>"], ordered=False) self.assertEqual(str(q1.query), str(q2.query)) q1 = Report.objects.filter(Q(creator__extra__info='e1') | Q(creator__isnull=True)) q2 = Report.objects.filter(Q(creator__extra__info='e1')) | Report.objects.filter(Q(creator__isnull=True)) self.assertQuerysetEqual(q1, ["<Report: r1>", "<Report: r3>"], ordered=False) self.assertEqual(str(q1.query), str(q2.query)) q1 = Item.objects.filter(Q(creator=self.a1) | Q(creator__report__name='r1')).order_by() q2 = ( Item.objects .filter(Q(creator=self.a1)).order_by() | Item.objects.filter(Q(creator__report__name='r1')) .order_by() ) self.assertQuerysetEqual(q1, ["<Item: i1>"]) self.assertEqual(str(q1.query), str(q2.query)) q1 = Item.objects.filter(Q(creator__report__name='e1') | Q(creator=self.a1)).order_by() q2 = ( Item.objects.filter(Q(creator__report__name='e1')).order_by() | Item.objects.filter(Q(creator=self.a1)).order_by() ) self.assertQuerysetEqual(q1, ["<Item: i1>"]) self.assertEqual(str(q1.query), str(q2.query)) def test_combine_join_reuse(self): # Joins having identical connections are correctly recreated in the # rhs query, in case the query is ORed together (#18748). Report.objects.create(name='r4', creator=self.a1) q1 = Author.objects.filter(report__name='r5') q2 = Author.objects.filter(report__name='r4').filter(report__name='r1') combined = q1 | q2 self.assertEqual(str(combined.query).count('JOIN'), 2) self.assertEqual(len(combined), 1) self.assertEqual(combined[0].name, 'a1') def test_join_reuse_order(self): # Join aliases are reused in order. This shouldn't raise AssertionError # because change_map contains a circular reference (#26522). s1 = School.objects.create() s2 = School.objects.create() s3 = School.objects.create() t1 = Teacher.objects.create() otherteachers = Teacher.objects.exclude(pk=t1.pk).exclude(friends=t1) qs1 = otherteachers.filter(schools=s1).filter(schools=s2) qs2 = otherteachers.filter(schools=s1).filter(schools=s3) self.assertQuerysetEqual(qs1 | qs2, []) def test_ticket7095(self): # Updates that are filtered on the model being updated are somewhat # tricky in MySQL. ManagedModel.objects.create(data='mm1', tag=self.t1, public=True) self.assertEqual(ManagedModel.objects.update(data='mm'), 1) # A values() or values_list() query across joined models must use outer # joins appropriately. # Note: In Oracle, we expect a null CharField to return '' instead of # None. if connection.features.interprets_empty_strings_as_nulls: expected_null_charfield_repr = '' else: expected_null_charfield_repr = None self.assertSequenceEqual( Report.objects.values_list("creator__extra__info", flat=True).order_by("name"), ['e1', 'e2', expected_null_charfield_repr], ) # Similarly for select_related(), joins beyond an initial nullable join # must use outer joins so that all results are included. self.assertQuerysetEqual( Report.objects.select_related("creator", "creator__extra").order_by("name"), ['<Report: r1>', '<Report: r2>', '<Report: r3>'] ) # When there are multiple paths to a table from another table, we have # to be careful not to accidentally reuse an inappropriate join when # using select_related(). We used to return the parent's Detail record # here by mistake. d1 = Detail.objects.create(data="d1") d2 = Detail.objects.create(data="d2") m1 = Member.objects.create(name="m1", details=d1) m2 = Member.objects.create(name="m2", details=d2) Child.objects.create(person=m2, parent=m1) obj = m1.children.select_related("person__details")[0] self.assertEqual(obj.person.details.data, 'd2') def test_order_by_resetting(self): # Calling order_by() with no parameters removes any existing ordering on the # model. But it should still be possible to add new ordering after that. qs = Author.objects.order_by().order_by('name') self.assertIn('ORDER BY', qs.query.get_compiler(qs.db).as_sql()[0]) def test_order_by_reverse_fk(self): # It is possible to order by reverse of foreign key, although that can lead # to duplicate results. c1 = SimpleCategory.objects.create(name="category1") c2 = SimpleCategory.objects.create(name="category2") CategoryItem.objects.create(category=c1) CategoryItem.objects.create(category=c2) CategoryItem.objects.create(category=c1) self.assertSequenceEqual(SimpleCategory.objects.order_by('categoryitem', 'pk'), [c1, c2, c1]) def test_filter_reverse_non_integer_pk(self): date_obj = DateTimePK.objects.create() extra_obj = ExtraInfo.objects.create(info='extra', date=date_obj) self.assertEqual( DateTimePK.objects.filter(extrainfo=extra_obj).get(), date_obj, ) def test_ticket10181(self): # Avoid raising an EmptyResultSet if an inner query is probably # empty (and hence, not executed). self.assertQuerysetEqual( Tag.objects.filter(id__in=Tag.objects.filter(id__in=[])), [] ) def test_ticket15316_filter_false(self): c1 = SimpleCategory.objects.create(name="category1") c2 = SpecialCategory.objects.create(name="named category1", special_name="special1") c3 = SpecialCategory.objects.create(name="named category2", special_name="special2") CategoryItem.objects.create(category=c1) ci2 = CategoryItem.objects.create(category=c2) ci3 = CategoryItem.objects.create(category=c3) qs = CategoryItem.objects.filter(category__specialcategory__isnull=False) self.assertEqual(qs.count(), 2) self.assertSequenceEqual(qs, [ci2, ci3]) def test_ticket15316_exclude_false(self): c1 = SimpleCategory.objects.create(name="category1") c2 = SpecialCategory.objects.create(name="named category1", special_name="special1") c3 = SpecialCategory.objects.create(name="named category2", special_name="special2") ci1 = CategoryItem.objects.create(category=c1) CategoryItem.objects.create(category=c2) CategoryItem.objects.create(category=c3) qs = CategoryItem.objects.exclude(category__specialcategory__isnull=False) self.assertEqual(qs.count(), 1) self.assertSequenceEqual(qs, [ci1]) def test_ticket15316_filter_true(self): c1 = SimpleCategory.objects.create(name="category1") c2 = SpecialCategory.objects.create(name="named category1", special_name="special1") c3 = SpecialCategory.objects.create(name="named category2", special_name="special2") ci1 = CategoryItem.objects.create(category=c1) CategoryItem.objects.create(category=c2) CategoryItem.objects.create(category=c3) qs = CategoryItem.objects.filter(category__specialcategory__isnull=True) self.assertEqual(qs.count(), 1) self.assertSequenceEqual(qs, [ci1]) def test_ticket15316_exclude_true(self): c1 = SimpleCategory.objects.create(name="category1") c2 = SpecialCategory.objects.create(name="named category1", special_name="special1") c3 = SpecialCategory.objects.create(name="named category2", special_name="special2") CategoryItem.objects.create(category=c1) ci2 = CategoryItem.objects.create(category=c2) ci3 = CategoryItem.objects.create(category=c3) qs = CategoryItem.objects.exclude(category__specialcategory__isnull=True) self.assertEqual(qs.count(), 2) self.assertSequenceEqual(qs, [ci2, ci3]) def test_ticket15316_one2one_filter_false(self): c = SimpleCategory.objects.create(name="cat") c0 = SimpleCategory.objects.create(name="cat0") c1 = SimpleCategory.objects.create(name="category1") OneToOneCategory.objects.create(category=c1, new_name="new1") OneToOneCategory.objects.create(category=c0, new_name="new2") CategoryItem.objects.create(category=c) ci2 = CategoryItem.objects.create(category=c0) ci3 = CategoryItem.objects.create(category=c1) qs = CategoryItem.objects.filter(category__onetoonecategory__isnull=False).order_by('pk') self.assertEqual(qs.count(), 2) self.assertSequenceEqual(qs, [ci2, ci3]) def test_ticket15316_one2one_exclude_false(self): c = SimpleCategory.objects.create(name="cat") c0 = SimpleCategory.objects.create(name="cat0") c1 = SimpleCategory.objects.create(name="category1") OneToOneCategory.objects.create(category=c1, new_name="new1") OneToOneCategory.objects.create(category=c0, new_name="new2") ci1 = CategoryItem.objects.create(category=c) CategoryItem.objects.create(category=c0) CategoryItem.objects.create(category=c1) qs = CategoryItem.objects.exclude(category__onetoonecategory__isnull=False) self.assertEqual(qs.count(), 1) self.assertSequenceEqual(qs, [ci1]) def test_ticket15316_one2one_filter_true(self): c = SimpleCategory.objects.create(name="cat") c0 = SimpleCategory.objects.create(name="cat0") c1 = SimpleCategory.objects.create(name="category1") OneToOneCategory.objects.create(category=c1, new_name="new1") OneToOneCategory.objects.create(category=c0, new_name="new2") ci1 = CategoryItem.objects.create(category=c) CategoryItem.objects.create(category=c0) CategoryItem.objects.create(category=c1) qs = CategoryItem.objects.filter(category__onetoonecategory__isnull=True) self.assertEqual(qs.count(), 1) self.assertSequenceEqual(qs, [ci1]) def test_ticket15316_one2one_exclude_true(self): c = SimpleCategory.objects.create(name="cat") c0 = SimpleCategory.objects.create(name="cat0") c1 = SimpleCategory.objects.create(name="category1") OneToOneCategory.objects.create(category=c1, new_name="new1") OneToOneCategory.objects.create(category=c0, new_name="new2") CategoryItem.objects.create(category=c) ci2 = CategoryItem.objects.create(category=c0) ci3 = CategoryItem.objects.create(category=c1) qs = CategoryItem.objects.exclude(category__onetoonecategory__isnull=True).order_by('pk') self.assertEqual(qs.count(), 2) self.assertSequenceEqual(qs, [ci2, ci3]) class Queries5Tests(TestCase): @classmethod def setUpTestData(cls): # Ordering by 'rank' gives us rank2, rank1, rank3. Ordering by the # Meta.ordering will be rank3, rank2, rank1. n1 = Note.objects.create(note='n1', misc='foo', id=1) n2 = Note.objects.create(note='n2', misc='bar', id=2) e1 = ExtraInfo.objects.create(info='e1', note=n1) e2 = ExtraInfo.objects.create(info='e2', note=n2) a1 = Author.objects.create(name='a1', num=1001, extra=e1) a2 = Author.objects.create(name='a2', num=2002, extra=e1) a3 = Author.objects.create(name='a3', num=3003, extra=e2) cls.rank1 = Ranking.objects.create(rank=2, author=a2) Ranking.objects.create(rank=1, author=a3) Ranking.objects.create(rank=3, author=a1) def test_ordering(self): # Cross model ordering is possible in Meta, too. self.assertQuerysetEqual( Ranking.objects.all(), ['<Ranking: 3: a1>', '<Ranking: 2: a2>', '<Ranking: 1: a3>'] ) self.assertQuerysetEqual( Ranking.objects.all().order_by('rank'), ['<Ranking: 1: a3>', '<Ranking: 2: a2>', '<Ranking: 3: a1>'] ) # Ordering of extra() pieces is possible, too and you can mix extra # fields and model fields in the ordering. self.assertQuerysetEqual( Ranking.objects.extra(tables=['django_site'], order_by=['-django_site.id', 'rank']), ['<Ranking: 1: a3>', '<Ranking: 2: a2>', '<Ranking: 3: a1>'] ) sql = 'case when %s > 2 then 1 else 0 end' % connection.ops.quote_name('rank') qs = Ranking.objects.extra(select={'good': sql}) self.assertEqual( [o.good for o in qs.extra(order_by=('-good',))], [True, False, False] ) self.assertQuerysetEqual( qs.extra(order_by=('-good', 'id')), ['<Ranking: 3: a1>', '<Ranking: 2: a2>', '<Ranking: 1: a3>'] ) # Despite having some extra aliases in the query, we can still omit # them in a values() query. dicts = qs.values('id', 'rank').order_by('id') self.assertEqual( [d['rank'] for d in dicts], [2, 1, 3] ) def test_ticket7256(self): # An empty values() call includes all aliases, including those from an # extra() sql = 'case when %s > 2 then 1 else 0 end' % connection.ops.quote_name('rank') qs = Ranking.objects.extra(select={'good': sql}) dicts = qs.values().order_by('id') for d in dicts: del d['id'] del d['author_id'] self.assertEqual( [sorted(d.items()) for d in dicts], [[('good', 0), ('rank', 2)], [('good', 0), ('rank', 1)], [('good', 1), ('rank', 3)]] ) def test_ticket7045(self): # Extra tables used to crash SQL construction on the second use. qs = Ranking.objects.extra(tables=['django_site']) qs.query.get_compiler(qs.db).as_sql() # test passes if this doesn't raise an exception. qs.query.get_compiler(qs.db).as_sql() def test_ticket9848(self): # Make sure that updates which only filter on sub-tables don't # inadvertently update the wrong records (bug #9848). author_start = Author.objects.get(name='a1') ranking_start = Ranking.objects.get(author__name='a1') # Make sure that the IDs from different tables don't happen to match. self.assertQuerysetEqual( Ranking.objects.filter(author__name='a1'), ['<Ranking: 3: a1>'] ) self.assertEqual( Ranking.objects.filter(author__name='a1').update(rank=4636), 1 ) r = Ranking.objects.get(author__name='a1') self.assertEqual(r.id, ranking_start.id) self.assertEqual(r.author.id, author_start.id) self.assertEqual(r.rank, 4636) r.rank = 3 r.save() self.assertQuerysetEqual( Ranking.objects.all(), ['<Ranking: 3: a1>', '<Ranking: 2: a2>', '<Ranking: 1: a3>'] ) def test_ticket5261(self): # Test different empty excludes. self.assertQuerysetEqual( Note.objects.exclude(Q()), ['<Note: n1>', '<Note: n2>'] ) self.assertQuerysetEqual( Note.objects.filter(~Q()), ['<Note: n1>', '<Note: n2>'] ) self.assertQuerysetEqual( Note.objects.filter(~Q() | ~Q()), ['<Note: n1>', '<Note: n2>'] ) self.assertQuerysetEqual( Note.objects.exclude(~Q() & ~Q()), ['<Note: n1>', '<Note: n2>'] ) def test_extra_select_literal_percent_s(self): # Allow %%s to escape select clauses self.assertEqual( Note.objects.extra(select={'foo': "'%%s'"})[0].foo, '%s' ) self.assertEqual( Note.objects.extra(select={'foo': "'%%s bar %%s'"})[0].foo, '%s bar %s' ) self.assertEqual( Note.objects.extra(select={'foo': "'bar %%s'"})[0].foo, 'bar %s' ) class SelectRelatedTests(TestCase): def test_tickets_3045_3288(self): # Once upon a time, select_related() with circular relations would loop # infinitely if you forgot to specify "depth". Now we set an arbitrary # default upper bound. self.assertQuerysetEqual(X.objects.all(), []) self.assertQuerysetEqual(X.objects.select_related(), []) class SubclassFKTests(TestCase): def test_ticket7778(self): # Model subclasses could not be deleted if a nullable foreign key # relates to a model that relates back. num_celebs = Celebrity.objects.count() tvc = TvChef.objects.create(name="Huey") self.assertEqual(Celebrity.objects.count(), num_celebs + 1) Fan.objects.create(fan_of=tvc) Fan.objects.create(fan_of=tvc) tvc.delete() # The parent object should have been deleted as well. self.assertEqual(Celebrity.objects.count(), num_celebs) class CustomPkTests(TestCase): def test_ticket7371(self): self.assertQuerysetEqual(Related.objects.order_by('custom'), []) class NullableRelOrderingTests(TestCase): def test_ticket10028(self): # Ordering by model related to nullable relations(!) should use outer # joins, so that all results are included. Plaything.objects.create(name="p1") self.assertQuerysetEqual( Plaything.objects.all(), ['<Plaything: p1>'] ) def test_join_already_in_query(self): # Ordering by model related to nullable relations should not change # the join type of already existing joins. Plaything.objects.create(name="p1") s = SingleObject.objects.create(name='s') r = RelatedObject.objects.create(single=s, f=1) Plaything.objects.create(name="p2", others=r) qs = Plaything.objects.all().filter(others__isnull=False).order_by('pk') self.assertNotIn('JOIN', str(qs.query)) qs = Plaything.objects.all().filter(others__f__isnull=False).order_by('pk') self.assertIn('INNER', str(qs.query)) qs = qs.order_by('others__single__name') # The ordering by others__single__pk will add one new join (to single) # and that join must be LEFT join. The already existing join to related # objects must be kept INNER. So, we have both an INNER and a LEFT join # in the query. self.assertEqual(str(qs.query).count('LEFT'), 1) self.assertEqual(str(qs.query).count('INNER'), 1) self.assertQuerysetEqual( qs, ['<Plaything: p2>'] ) class DisjunctiveFilterTests(TestCase): @classmethod def setUpTestData(cls): cls.n1 = Note.objects.create(note='n1', misc='foo', id=1) ExtraInfo.objects.create(info='e1', note=cls.n1) def test_ticket7872(self): # Another variation on the disjunctive filtering theme. # For the purposes of this regression test, it's important that there is no # Join object related to the LeafA we create. LeafA.objects.create(data='first') self.assertQuerysetEqual(LeafA.objects.all(), ['<LeafA: first>']) self.assertQuerysetEqual( LeafA.objects.filter(Q(data='first') | Q(join__b__data='second')), ['<LeafA: first>'] ) def test_ticket8283(self): # Checking that applying filters after a disjunction works correctly. self.assertQuerysetEqual( (ExtraInfo.objects.filter(note=self.n1) | ExtraInfo.objects.filter(info='e2')).filter(note=self.n1), ['<ExtraInfo: e1>'] ) self.assertQuerysetEqual( (ExtraInfo.objects.filter(info='e2') | ExtraInfo.objects.filter(note=self.n1)).filter(note=self.n1), ['<ExtraInfo: e1>'] ) class Queries6Tests(TestCase): @classmethod def setUpTestData(cls): generic = NamedCategory.objects.create(name="Generic") cls.t1 = Tag.objects.create(name='t1', category=generic) cls.t2 = Tag.objects.create(name='t2', parent=cls.t1, category=generic) cls.t3 = Tag.objects.create(name='t3', parent=cls.t1) cls.t4 = Tag.objects.create(name='t4', parent=cls.t3) cls.t5 = Tag.objects.create(name='t5', parent=cls.t3) n1 = Note.objects.create(note='n1', misc='foo', id=1) ann1 = Annotation.objects.create(name='a1', tag=cls.t1) ann1.notes.add(n1) Annotation.objects.create(name='a2', tag=cls.t4) def test_parallel_iterators(self): # Parallel iterators work. qs = Tag.objects.all() i1, i2 = iter(qs), iter(qs) self.assertEqual(repr(next(i1)), '<Tag: t1>') self.assertEqual(repr(next(i1)), '<Tag: t2>') self.assertEqual(repr(next(i2)), '<Tag: t1>') self.assertEqual(repr(next(i2)), '<Tag: t2>') self.assertEqual(repr(next(i2)), '<Tag: t3>') self.assertEqual(repr(next(i1)), '<Tag: t3>') qs = X.objects.all() self.assertFalse(qs) self.assertFalse(qs) def test_nested_queries_sql(self): # Nested queries should not evaluate the inner query as part of constructing the # SQL (so we should see a nested query here, indicated by two "SELECT" calls). qs = Annotation.objects.filter(notes__in=Note.objects.filter(note="xyzzy")) self.assertEqual( qs.query.get_compiler(qs.db).as_sql()[0].count('SELECT'), 2 ) def test_tickets_8921_9188(self): # Incorrect SQL was being generated for certain types of exclude() # queries that crossed multi-valued relations (#8921, #9188 and some # preemptively discovered cases). self.assertQuerysetEqual( PointerA.objects.filter(connection__pointerb__id=1), [] ) self.assertQuerysetEqual( PointerA.objects.exclude(connection__pointerb__id=1), [] ) self.assertQuerysetEqual( Tag.objects.exclude(children=None), ['<Tag: t1>', '<Tag: t3>'] ) # This example is tricky because the parent could be NULL, so only checking # parents with annotations omits some results (tag t1, in this case). self.assertQuerysetEqual( Tag.objects.exclude(parent__annotation__name="a1"), ['<Tag: t1>', '<Tag: t4>', '<Tag: t5>'] ) # The annotation->tag link is single values and tag->children links is # multi-valued. So we have to split the exclude filter in the middle # and then optimize the inner query without losing results. self.assertQuerysetEqual( Annotation.objects.exclude(tag__children__name="t2"), ['<Annotation: a2>'] ) # Nested queries are possible (although should be used with care, since # they have performance problems on backends like MySQL. self.assertQuerysetEqual( Annotation.objects.filter(notes__in=Note.objects.filter(note="n1")), ['<Annotation: a1>'] ) def test_ticket3739(self): # The all() method on querysets returns a copy of the queryset. q1 = Tag.objects.order_by('name') self.assertIsNot(q1, q1.all()) def test_ticket_11320(self): qs = Tag.objects.exclude(category=None).exclude(category__name='foo') self.assertEqual(str(qs.query).count(' INNER JOIN '), 1) def test_distinct_ordered_sliced_subquery_aggregation(self): self.assertEqual(Tag.objects.distinct().order_by('category__name')[:3].count(), 3) def test_multiple_columns_with_the_same_name_slice(self): self.assertEqual( list(Tag.objects.order_by('name').values_list('name', 'category__name')[:2]), [('t1', 'Generic'), ('t2', 'Generic')], ) self.assertSequenceEqual( Tag.objects.order_by('name').select_related('category')[:2], [self.t1, self.t2], ) self.assertEqual( list(Tag.objects.order_by('-name').values_list('name', 'parent__name')[:2]), [('t5', 't3'), ('t4', 't3')], ) self.assertSequenceEqual( Tag.objects.order_by('-name').select_related('parent')[:2], [self.t5, self.t4], ) class RawQueriesTests(TestCase): def setUp(self): Note.objects.create(note='n1', misc='foo', id=1) def test_ticket14729(self): # Test representation of raw query with one or few parameters passed as list query = "SELECT * FROM queries_note WHERE note = %s" params = ['n1'] qs = Note.objects.raw(query, params=params) self.assertEqual(repr(qs), "<RawQuerySet: SELECT * FROM queries_note WHERE note = n1>") query = "SELECT * FROM queries_note WHERE note = %s and misc = %s" params = ['n1', 'foo'] qs = Note.objects.raw(query, params=params) self.assertEqual(repr(qs), "<RawQuerySet: SELECT * FROM queries_note WHERE note = n1 and misc = foo>") class GeneratorExpressionTests(SimpleTestCase): def test_ticket10432(self): # Using an empty iterator as the rvalue for an "__in" # lookup is legal. self.assertCountEqual(Note.objects.filter(pk__in=iter(())), []) class ComparisonTests(TestCase): def setUp(self): self.n1 = Note.objects.create(note='n1', misc='foo', id=1) e1 = ExtraInfo.objects.create(info='e1', note=self.n1) self.a2 = Author.objects.create(name='a2', num=2002, extra=e1) def test_ticket8597(self): # Regression tests for case-insensitive comparisons Item.objects.create(name="a_b", created=datetime.datetime.now(), creator=self.a2, note=self.n1) Item.objects.create(name="x%y", created=datetime.datetime.now(), creator=self.a2, note=self.n1) self.assertQuerysetEqual( Item.objects.filter(name__iexact="A_b"), ['<Item: a_b>'] ) self.assertQuerysetEqual( Item.objects.filter(name__iexact="x%Y"), ['<Item: x%y>'] ) self.assertQuerysetEqual( Item.objects.filter(name__istartswith="A_b"), ['<Item: a_b>'] ) self.assertQuerysetEqual( Item.objects.filter(name__iendswith="A_b"), ['<Item: a_b>'] ) class ExistsSql(TestCase): def test_exists(self): with CaptureQueriesContext(connection) as captured_queries: self.assertFalse(Tag.objects.exists()) # Ok - so the exist query worked - but did it include too many columns? self.assertEqual(len(captured_queries), 1) qstr = captured_queries[0]['sql'] id, name = connection.ops.quote_name('id'), connection.ops.quote_name('name') self.assertNotIn(id, qstr) self.assertNotIn(name, qstr) def test_ticket_18414(self): Article.objects.create(name='one', created=datetime.datetime.now()) Article.objects.create(name='one', created=datetime.datetime.now()) Article.objects.create(name='two', created=datetime.datetime.now()) self.assertTrue(Article.objects.exists()) self.assertTrue(Article.objects.distinct().exists()) self.assertTrue(Article.objects.distinct()[1:3].exists()) self.assertFalse(Article.objects.distinct()[1:1].exists()) @skipUnlessDBFeature('can_distinct_on_fields') def test_ticket_18414_distinct_on(self): Article.objects.create(name='one', created=datetime.datetime.now()) Article.objects.create(name='one', created=datetime.datetime.now()) Article.objects.create(name='two', created=datetime.datetime.now()) self.assertTrue(Article.objects.distinct('name').exists()) self.assertTrue(Article.objects.distinct('name')[1:2].exists()) self.assertFalse(Article.objects.distinct('name')[2:3].exists()) class QuerysetOrderedTests(unittest.TestCase): """ Tests for the Queryset.ordered attribute. """ def test_no_default_or_explicit_ordering(self): self.assertIs(Annotation.objects.all().ordered, False) def test_cleared_default_ordering(self): self.assertIs(Tag.objects.all().ordered, True) self.assertIs(Tag.objects.all().order_by().ordered, False) def test_explicit_ordering(self): self.assertIs(Annotation.objects.all().order_by('id').ordered, True) def test_empty_queryset(self): self.assertIs(Annotation.objects.none().ordered, True) def test_order_by_extra(self): self.assertIs(Annotation.objects.all().extra(order_by=['id']).ordered, True) def test_annotated_ordering(self): qs = Annotation.objects.annotate(num_notes=Count('notes')) self.assertIs(qs.ordered, False) self.assertIs(qs.order_by('num_notes').ordered, True) @skipUnlessDBFeature('allow_sliced_subqueries_with_in') class SubqueryTests(TestCase): @classmethod def setUpTestData(cls): NamedCategory.objects.create(id=1, name='first') NamedCategory.objects.create(id=2, name='second') NamedCategory.objects.create(id=3, name='third') NamedCategory.objects.create(id=4, name='fourth') def test_ordered_subselect(self): "Subselects honor any manual ordering" query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[0:2]) self.assertEqual(set(query.values_list('id', flat=True)), {3, 4}) query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[:2]) self.assertEqual(set(query.values_list('id', flat=True)), {3, 4}) query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[1:2]) self.assertEqual(set(query.values_list('id', flat=True)), {3}) query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[2:]) self.assertEqual(set(query.values_list('id', flat=True)), {1, 2}) def test_slice_subquery_and_query(self): """ Slice a query that has a sliced subquery """ query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[0:2])[0:2] self.assertEqual({x.id for x in query}, {3, 4}) query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[1:3])[1:3] self.assertEqual({x.id for x in query}, {3}) query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[2:])[1:] self.assertEqual({x.id for x in query}, {2}) def test_related_sliced_subquery(self): """ Related objects constraints can safely contain sliced subqueries. refs #22434 """ generic = NamedCategory.objects.create(id=5, name="Generic") t1 = Tag.objects.create(name='t1', category=generic) t2 = Tag.objects.create(name='t2', category=generic) ManagedModel.objects.create(data='mm1', tag=t1, public=True) mm2 = ManagedModel.objects.create(data='mm2', tag=t2, public=True) query = ManagedModel.normal_manager.filter( tag__in=Tag.objects.order_by('-id')[:1] ) self.assertEqual({x.id for x in query}, {mm2.id}) def test_sliced_delete(self): "Delete queries can safely contain sliced subqueries" DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[0:1]).delete() self.assertEqual(set(DumbCategory.objects.values_list('id', flat=True)), {1, 2, 3}) DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[1:2]).delete() self.assertEqual(set(DumbCategory.objects.values_list('id', flat=True)), {1, 3}) DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[1:]).delete() self.assertEqual(set(DumbCategory.objects.values_list('id', flat=True)), {3}) def test_distinct_ordered_sliced_subquery(self): # Implicit values('id'). self.assertSequenceEqual( NamedCategory.objects.filter( id__in=NamedCategory.objects.distinct().order_by('name')[0:2], ).order_by('name').values_list('name', flat=True), ['first', 'fourth'] ) # Explicit values('id'). self.assertSequenceEqual( NamedCategory.objects.filter( id__in=NamedCategory.objects.distinct().order_by('-name').values('id')[0:2], ).order_by('name').values_list('name', flat=True), ['second', 'third'] ) # Annotated value. self.assertSequenceEqual( DumbCategory.objects.filter( id__in=DumbCategory.objects.annotate( double_id=F('id') * 2 ).order_by('id').distinct().values('double_id')[0:2], ).order_by('id').values_list('id', flat=True), [2, 4] ) @skipUnlessDBFeature('allow_sliced_subqueries_with_in') class QuerySetBitwiseOperationTests(TestCase): @classmethod def setUpTestData(cls): school = School.objects.create() cls.room_1 = Classroom.objects.create(school=school, has_blackboard=False, name='Room 1') cls.room_2 = Classroom.objects.create(school=school, has_blackboard=True, name='Room 2') cls.room_3 = Classroom.objects.create(school=school, has_blackboard=True, name='Room 3') cls.room_4 = Classroom.objects.create(school=school, has_blackboard=False, name='Room 4') def test_or_with_rhs_slice(self): qs1 = Classroom.objects.filter(has_blackboard=True) qs2 = Classroom.objects.filter(has_blackboard=False)[:1] self.assertCountEqual(qs1 | qs2, [self.room_1, self.room_2, self.room_3]) def test_or_with_lhs_slice(self): qs1 = Classroom.objects.filter(has_blackboard=True)[:1] qs2 = Classroom.objects.filter(has_blackboard=False) self.assertCountEqual(qs1 | qs2, [self.room_1, self.room_2, self.room_4]) def test_or_with_both_slice(self): qs1 = Classroom.objects.filter(has_blackboard=False)[:1] qs2 = Classroom.objects.filter(has_blackboard=True)[:1] self.assertCountEqual(qs1 | qs2, [self.room_1, self.room_2]) def test_or_with_both_slice_and_ordering(self): qs1 = Classroom.objects.filter(has_blackboard=False).order_by('-pk')[:1] qs2 = Classroom.objects.filter(has_blackboard=True).order_by('-name')[:1] self.assertCountEqual(qs1 | qs2, [self.room_3, self.room_4]) class CloneTests(TestCase): def test_evaluated_queryset_as_argument(self): "#13227 -- If a queryset is already evaluated, it can still be used as a query arg" n = Note(note='Test1', misc='misc') n.save() e = ExtraInfo(info='good', note=n) e.save() n_list = Note.objects.all() # Evaluate the Note queryset, populating the query cache list(n_list) # Use the note queryset in a query, and evaluate # that query in a way that involves cloning. self.assertEqual(ExtraInfo.objects.filter(note__in=n_list)[0].info, 'good') def test_no_model_options_cloning(self): """ Cloning a queryset does not get out of hand. While complete testing is impossible, this is a sanity check against invalid use of deepcopy. refs #16759. """ opts_class = type(Note._meta) note_deepcopy = getattr(opts_class, "__deepcopy__", None) opts_class.__deepcopy__ = lambda obj, memo: self.fail("Model options shouldn't be cloned.") try: Note.objects.filter(pk__lte=F('pk') + 1).all() finally: if note_deepcopy is None: delattr(opts_class, "__deepcopy__") else: opts_class.__deepcopy__ = note_deepcopy def test_no_fields_cloning(self): """ Cloning a queryset does not get out of hand. While complete testing is impossible, this is a sanity check against invalid use of deepcopy. refs #16759. """ opts_class = type(Note._meta.get_field("misc")) note_deepcopy = getattr(opts_class, "__deepcopy__", None) opts_class.__deepcopy__ = lambda obj, memo: self.fail("Model fields shouldn't be cloned") try: Note.objects.filter(note=F('misc')).all() finally: if note_deepcopy is None: delattr(opts_class, "__deepcopy__") else: opts_class.__deepcopy__ = note_deepcopy class EmptyQuerySetTests(SimpleTestCase): def test_emptyqueryset_values(self): # #14366 -- Calling .values() on an empty QuerySet and then cloning # that should not cause an error self.assertCountEqual(Number.objects.none().values('num').order_by('num'), []) def test_values_subquery(self): self.assertCountEqual(Number.objects.filter(pk__in=Number.objects.none().values('pk')), []) self.assertCountEqual(Number.objects.filter(pk__in=Number.objects.none().values_list('pk')), []) def test_ticket_19151(self): # #19151 -- Calling .values() or .values_list() on an empty QuerySet # should return an empty QuerySet and not cause an error. q = Author.objects.none() self.assertCountEqual(q.values(), []) self.assertCountEqual(q.values_list(), []) class ValuesQuerysetTests(TestCase): @classmethod def setUpTestData(cls): Number.objects.create(num=72) def test_flat_values_list(self): qs = Number.objects.values_list("num") qs = qs.values_list("num", flat=True) self.assertSequenceEqual(qs, [72]) def test_extra_values(self): # testing for ticket 14930 issues qs = Number.objects.extra(select={'value_plus_x': 'num+%s', 'value_minus_x': 'num-%s'}, select_params=(1, 2)) qs = qs.order_by('value_minus_x') qs = qs.values('num') self.assertSequenceEqual(qs, [{'num': 72}]) def test_extra_values_order_twice(self): # testing for ticket 14930 issues qs = Number.objects.extra(select={'value_plus_one': 'num+1', 'value_minus_one': 'num-1'}) qs = qs.order_by('value_minus_one').order_by('value_plus_one') qs = qs.values('num') self.assertSequenceEqual(qs, [{'num': 72}]) def test_extra_values_order_multiple(self): # Postgres doesn't allow constants in order by, so check for that. qs = Number.objects.extra(select={ 'value_plus_one': 'num+1', 'value_minus_one': 'num-1', 'constant_value': '1' }) qs = qs.order_by('value_plus_one', 'value_minus_one', 'constant_value') qs = qs.values('num') self.assertSequenceEqual(qs, [{'num': 72}]) def test_extra_values_order_in_extra(self): # testing for ticket 14930 issues qs = Number.objects.extra( select={'value_plus_one': 'num+1', 'value_minus_one': 'num-1'}, order_by=['value_minus_one'], ) qs = qs.values('num') def test_extra_select_params_values_order_in_extra(self): # testing for 23259 issue qs = Number.objects.extra( select={'value_plus_x': 'num+%s'}, select_params=[1], order_by=['value_plus_x'], ) qs = qs.filter(num=72) qs = qs.values('num') self.assertSequenceEqual(qs, [{'num': 72}]) def test_extra_multiple_select_params_values_order_by(self): # testing for 23259 issue qs = Number.objects.extra(select={'value_plus_x': 'num+%s', 'value_minus_x': 'num-%s'}, select_params=(72, 72)) qs = qs.order_by('value_minus_x') qs = qs.filter(num=1) qs = qs.values('num') self.assertSequenceEqual(qs, []) def test_extra_values_list(self): # testing for ticket 14930 issues qs = Number.objects.extra(select={'value_plus_one': 'num+1'}) qs = qs.order_by('value_plus_one') qs = qs.values_list('num') self.assertSequenceEqual(qs, [(72,)]) def test_flat_extra_values_list(self): # testing for ticket 14930 issues qs = Number.objects.extra(select={'value_plus_one': 'num+1'}) qs = qs.order_by('value_plus_one') qs = qs.values_list('num', flat=True) self.assertSequenceEqual(qs, [72]) def test_field_error_values_list(self): # see #23443 msg = "Cannot resolve keyword %r into field. Join on 'name' not permitted." % 'foo' with self.assertRaisesMessage(FieldError, msg): Tag.objects.values_list('name__foo') def test_named_values_list_flat(self): msg = "'flat' and 'named' can't be used together." with self.assertRaisesMessage(TypeError, msg): Number.objects.values_list('num', flat=True, named=True) def test_named_values_list_bad_field_name(self): msg = "Type names and field names must be valid identifiers: '1'" with self.assertRaisesMessage(ValueError, msg): Number.objects.extra(select={'1': 'num+1'}).values_list('1', named=True).first() def test_named_values_list_with_fields(self): qs = Number.objects.extra(select={'num2': 'num+1'}).annotate(Count('id')) values = qs.values_list('num', 'num2', named=True).first() self.assertEqual(type(values).__name__, 'Row') self.assertEqual(values._fields, ('num', 'num2')) self.assertEqual(values.num, 72) self.assertEqual(values.num2, 73) def test_named_values_list_without_fields(self): qs = Number.objects.extra(select={'num2': 'num+1'}).annotate(Count('id')) values = qs.values_list(named=True).first() self.assertEqual(type(values).__name__, 'Row') self.assertEqual(values._fields, ('num2', 'id', 'num', 'other_num', 'id__count')) self.assertEqual(values.num, 72) self.assertEqual(values.num2, 73) self.assertEqual(values.id__count, 1) def test_named_values_list_expression_with_default_alias(self): expr = Count('id') values = Number.objects.annotate(id__count1=expr).values_list(expr, 'id__count1', named=True).first() self.assertEqual(values._fields, ('id__count2', 'id__count1')) def test_named_values_list_expression(self): expr = F('num') + 1 qs = Number.objects.annotate(combinedexpression1=expr).values_list(expr, 'combinedexpression1', named=True) values = qs.first() self.assertEqual(values._fields, ('combinedexpression2', 'combinedexpression1')) class QuerySetSupportsPythonIdioms(TestCase): @classmethod def setUpTestData(cls): some_date = datetime.datetime(2014, 5, 16, 12, 1) for i in range(1, 8): Article.objects.create( name="Article {}".format(i), created=some_date) def get_ordered_articles(self): return Article.objects.all().order_by('name') def test_can_get_items_using_index_and_slice_notation(self): self.assertEqual(self.get_ordered_articles()[0].name, 'Article 1') self.assertQuerysetEqual( self.get_ordered_articles()[1:3], ["<Article: Article 2>", "<Article: Article 3>"] ) def test_slicing_with_steps_can_be_used(self): self.assertQuerysetEqual( self.get_ordered_articles()[::2], [ "<Article: Article 1>", "<Article: Article 3>", "<Article: Article 5>", "<Article: Article 7>" ] ) def test_slicing_without_step_is_lazy(self): with self.assertNumQueries(0): self.get_ordered_articles()[0:5] def test_slicing_with_tests_is_not_lazy(self): with self.assertNumQueries(1): self.get_ordered_articles()[0:5:3] def test_slicing_can_slice_again_after_slicing(self): self.assertQuerysetEqual( self.get_ordered_articles()[0:5][0:2], ["<Article: Article 1>", "<Article: Article 2>"] ) self.assertQuerysetEqual(self.get_ordered_articles()[0:5][4:], ["<Article: Article 5>"]) self.assertQuerysetEqual(self.get_ordered_articles()[0:5][5:], []) # Some more tests! self.assertQuerysetEqual( self.get_ordered_articles()[2:][0:2], ["<Article: Article 3>", "<Article: Article 4>"] ) self.assertQuerysetEqual( self.get_ordered_articles()[2:][:2], ["<Article: Article 3>", "<Article: Article 4>"] ) self.assertQuerysetEqual(self.get_ordered_articles()[2:][2:3], ["<Article: Article 5>"]) # Using an offset without a limit is also possible. self.assertQuerysetEqual( self.get_ordered_articles()[5:], ["<Article: Article 6>", "<Article: Article 7>"] ) def test_slicing_cannot_filter_queryset_once_sliced(self): with self.assertRaisesMessage(AssertionError, "Cannot filter a query once a slice has been taken."): Article.objects.all()[0:5].filter(id=1) def test_slicing_cannot_reorder_queryset_once_sliced(self): with self.assertRaisesMessage(AssertionError, "Cannot reorder a query once a slice has been taken."): Article.objects.all()[0:5].order_by('id') def test_slicing_cannot_combine_queries_once_sliced(self): with self.assertRaisesMessage(AssertionError, "Cannot combine queries once a slice has been taken."): Article.objects.all()[0:1] & Article.objects.all()[4:5] def test_slicing_negative_indexing_not_supported_for_single_element(self): """hint: inverting your ordering might do what you need""" with self.assertRaisesMessage(AssertionError, "Negative indexing is not supported."): Article.objects.all()[-1] def test_slicing_negative_indexing_not_supported_for_range(self): """hint: inverting your ordering might do what you need""" with self.assertRaisesMessage(AssertionError, "Negative indexing is not supported."): Article.objects.all()[0:-5] def test_can_get_number_of_items_in_queryset_using_standard_len(self): self.assertEqual(len(Article.objects.filter(name__exact='Article 1')), 1) def test_can_combine_queries_using_and_and_or_operators(self): s1 = Article.objects.filter(name__exact='Article 1') s2 = Article.objects.filter(name__exact='Article 2') self.assertQuerysetEqual( (s1 | s2).order_by('name'), ["<Article: Article 1>", "<Article: Article 2>"] ) self.assertQuerysetEqual(s1 & s2, []) class WeirdQuerysetSlicingTests(TestCase): @classmethod def setUpTestData(cls): Number.objects.create(num=1) Number.objects.create(num=2) Article.objects.create(name='one', created=datetime.datetime.now()) Article.objects.create(name='two', created=datetime.datetime.now()) Article.objects.create(name='three', created=datetime.datetime.now()) Article.objects.create(name='four', created=datetime.datetime.now()) food = Food.objects.create(name='spam') Eaten.objects.create(meal='spam with eggs', food=food) def test_tickets_7698_10202(self): # People like to slice with '0' as the high-water mark. self.assertQuerysetEqual(Article.objects.all()[0:0], []) self.assertQuerysetEqual(Article.objects.all()[0:0][:10], []) self.assertEqual(Article.objects.all()[:0].count(), 0) with self.assertRaisesMessage(TypeError, 'Cannot reverse a query once a slice has been taken.'): Article.objects.all()[:0].latest('created') def test_empty_resultset_sql(self): # ticket #12192 self.assertNumQueries(0, lambda: list(Number.objects.all()[1:1])) def test_empty_sliced_subquery(self): self.assertEqual(Eaten.objects.filter(food__in=Food.objects.all()[0:0]).count(), 0) def test_empty_sliced_subquery_exclude(self): self.assertEqual(Eaten.objects.exclude(food__in=Food.objects.all()[0:0]).count(), 1) def test_zero_length_values_slicing(self): n = 42 with self.assertNumQueries(0): self.assertQuerysetEqual(Article.objects.values()[n:n], []) self.assertQuerysetEqual(Article.objects.values_list()[n:n], []) class EscapingTests(TestCase): def test_ticket_7302(self): # Reserved names are appropriately escaped ReservedName.objects.create(name='a', order=42) ReservedName.objects.create(name='b', order=37) self.assertQuerysetEqual( ReservedName.objects.all().order_by('order'), ['<ReservedName: b>', '<ReservedName: a>'] ) self.assertQuerysetEqual( ReservedName.objects.extra(select={'stuff': 'name'}, order_by=('order', 'stuff')), ['<ReservedName: b>', '<ReservedName: a>'] ) class ToFieldTests(TestCase): def test_in_query(self): apple = Food.objects.create(name="apple") pear = Food.objects.create(name="pear") lunch = Eaten.objects.create(food=apple, meal="lunch") dinner = Eaten.objects.create(food=pear, meal="dinner") self.assertEqual( set(Eaten.objects.filter(food__in=[apple, pear])), {lunch, dinner}, ) def test_in_subquery(self): apple = Food.objects.create(name="apple") lunch = Eaten.objects.create(food=apple, meal="lunch") self.assertEqual( set(Eaten.objects.filter(food__in=Food.objects.filter(name='apple'))), {lunch} ) self.assertEqual( set(Eaten.objects.filter(food__in=Food.objects.filter(name='apple').values('eaten__meal'))), set() ) self.assertEqual( set(Food.objects.filter(eaten__in=Eaten.objects.filter(meal='lunch'))), {apple} ) def test_nested_in_subquery(self): extra = ExtraInfo.objects.create() author = Author.objects.create(num=42, extra=extra) report = Report.objects.create(creator=author) comment = ReportComment.objects.create(report=report) comments = ReportComment.objects.filter( report__in=Report.objects.filter( creator__in=extra.author_set.all(), ), ) self.assertSequenceEqual(comments, [comment]) def test_reverse_in(self): apple = Food.objects.create(name="apple") pear = Food.objects.create(name="pear") lunch_apple = Eaten.objects.create(food=apple, meal="lunch") lunch_pear = Eaten.objects.create(food=pear, meal="dinner") self.assertEqual( set(Food.objects.filter(eaten__in=[lunch_apple, lunch_pear])), {apple, pear} ) def test_single_object(self): apple = Food.objects.create(name="apple") lunch = Eaten.objects.create(food=apple, meal="lunch") dinner = Eaten.objects.create(food=apple, meal="dinner") self.assertEqual( set(Eaten.objects.filter(food=apple)), {lunch, dinner} ) def test_single_object_reverse(self): apple = Food.objects.create(name="apple") lunch = Eaten.objects.create(food=apple, meal="lunch") self.assertEqual( set(Food.objects.filter(eaten=lunch)), {apple} ) def test_recursive_fk(self): node1 = Node.objects.create(num=42) node2 = Node.objects.create(num=1, parent=node1) self.assertEqual( list(Node.objects.filter(parent=node1)), [node2] ) def test_recursive_fk_reverse(self): node1 = Node.objects.create(num=42) node2 = Node.objects.create(num=1, parent=node1) self.assertEqual( list(Node.objects.filter(node=node2)), [node1] ) class IsNullTests(TestCase): def test_primary_key(self): custom = CustomPk.objects.create(name='pk') null = Related.objects.create() notnull = Related.objects.create(custom=custom) self.assertSequenceEqual(Related.objects.filter(custom__isnull=False), [notnull]) self.assertSequenceEqual(Related.objects.filter(custom__isnull=True), [null]) def test_to_field(self): apple = Food.objects.create(name="apple") Eaten.objects.create(food=apple, meal="lunch") Eaten.objects.create(meal="lunch") self.assertQuerysetEqual( Eaten.objects.filter(food__isnull=False), ['<Eaten: apple at lunch>'] ) self.assertQuerysetEqual( Eaten.objects.filter(food__isnull=True), ['<Eaten: None at lunch>'] ) class ConditionalTests(TestCase): """Tests whose execution depend on different environment conditions like Python version or DB backend features""" @classmethod def setUpTestData(cls): generic = NamedCategory.objects.create(name="Generic") t1 = Tag.objects.create(name='t1', category=generic) Tag.objects.create(name='t2', parent=t1, category=generic) t3 = Tag.objects.create(name='t3', parent=t1) Tag.objects.create(name='t4', parent=t3) Tag.objects.create(name='t5', parent=t3) def test_infinite_loop(self): # If you're not careful, it's possible to introduce infinite loops via # default ordering on foreign keys in a cycle. We detect that. with self.assertRaisesMessage(FieldError, 'Infinite loop caused by ordering.'): list(LoopX.objects.all()) # Force queryset evaluation with list() with self.assertRaisesMessage(FieldError, 'Infinite loop caused by ordering.'): list(LoopZ.objects.all()) # Force queryset evaluation with list() # Note that this doesn't cause an infinite loop, since the default # ordering on the Tag model is empty (and thus defaults to using "id" # for the related field). self.assertEqual(len(Tag.objects.order_by('parent')), 5) # ... but you can still order in a non-recursive fashion among linked # fields (the previous test failed because the default ordering was # recursive). self.assertQuerysetEqual( LoopX.objects.all().order_by('y__x__y__x__id'), [] ) # When grouping without specifying ordering, we add an explicit "ORDER BY NULL" # portion in MySQL to prevent unnecessary sorting. @skipUnlessDBFeature('requires_explicit_null_ordering_when_grouping') def test_null_ordering_added(self): query = Tag.objects.values_list('parent_id', flat=True).order_by().query query.group_by = ['parent_id'] sql = query.get_compiler(DEFAULT_DB_ALIAS).as_sql()[0] fragment = "ORDER BY " pos = sql.find(fragment) self.assertEqual(sql.find(fragment, pos + 1), -1) self.assertEqual(sql.find("NULL", pos + len(fragment)), pos + len(fragment)) def test_in_list_limit(self): # The "in" lookup works with lists of 1000 items or more. # The numbers amount is picked to force three different IN batches # for Oracle, yet to be less than 2100 parameter limit for MSSQL. numbers = list(range(2050)) max_query_params = connection.features.max_query_params if max_query_params is None or max_query_params >= len(numbers): Number.objects.bulk_create(Number(num=num) for num in numbers) for number in [1000, 1001, 2000, len(numbers)]: with self.subTest(number=number): self.assertEqual(Number.objects.filter(num__in=numbers[:number]).count(), number) class UnionTests(unittest.TestCase): """ Tests for the union of two querysets. Bug #12252. """ @classmethod def setUpTestData(cls): objectas = [] objectbs = [] objectcs = [] a_info = ['one', 'two', 'three'] for name in a_info: o = ObjectA(name=name) o.save() objectas.append(o) b_info = [('un', 1, objectas[0]), ('deux', 2, objectas[0]), ('trois', 3, objectas[2])] for name, number, objecta in b_info: o = ObjectB(name=name, num=number, objecta=objecta) o.save() objectbs.append(o) c_info = [('ein', objectas[2], objectbs[2]), ('zwei', objectas[1], objectbs[1])] for name, objecta, objectb in c_info: o = ObjectC(name=name, objecta=objecta, objectb=objectb) o.save() objectcs.append(o) def check_union(self, model, Q1, Q2): filter = model.objects.filter self.assertEqual(set(filter(Q1) | filter(Q2)), set(filter(Q1 | Q2))) self.assertEqual(set(filter(Q2) | filter(Q1)), set(filter(Q1 | Q2))) def test_A_AB(self): Q1 = Q(name='two') Q2 = Q(objectb__name='deux') self.check_union(ObjectA, Q1, Q2) def test_A_AB2(self): Q1 = Q(name='two') Q2 = Q(objectb__name='deux', objectb__num=2) self.check_union(ObjectA, Q1, Q2) def test_AB_ACB(self): Q1 = Q(objectb__name='deux') Q2 = Q(objectc__objectb__name='deux') self.check_union(ObjectA, Q1, Q2) def test_BAB_BAC(self): Q1 = Q(objecta__objectb__name='deux') Q2 = Q(objecta__objectc__name='ein') self.check_union(ObjectB, Q1, Q2) def test_BAB_BACB(self): Q1 = Q(objecta__objectb__name='deux') Q2 = Q(objecta__objectc__objectb__name='trois') self.check_union(ObjectB, Q1, Q2) def test_BA_BCA__BAB_BAC_BCA(self): Q1 = Q(objecta__name='one', objectc__objecta__name='two') Q2 = Q(objecta__objectc__name='ein', objectc__objecta__name='three', objecta__objectb__name='trois') self.check_union(ObjectB, Q1, Q2) class DefaultValuesInsertTest(TestCase): def test_no_extra_params(self): """ Can create an instance of a model with only the PK field (#17056)." """ DumbCategory.objects.create() class ExcludeTests(TestCase): @classmethod def setUpTestData(cls): f1 = Food.objects.create(name='apples') Food.objects.create(name='oranges') Eaten.objects.create(food=f1, meal='dinner') j1 = Job.objects.create(name='Manager') r1 = Responsibility.objects.create(description='Playing golf') j2 = Job.objects.create(name='Programmer') r2 = Responsibility.objects.create(description='Programming') JobResponsibilities.objects.create(job=j1, responsibility=r1) JobResponsibilities.objects.create(job=j2, responsibility=r2) def test_to_field(self): self.assertQuerysetEqual( Food.objects.exclude(eaten__meal='dinner'), ['<Food: oranges>']) self.assertQuerysetEqual( Job.objects.exclude(responsibilities__description='Playing golf'), ['<Job: Programmer>']) self.assertQuerysetEqual( Responsibility.objects.exclude(jobs__name='Manager'), ['<Responsibility: Programming>']) def test_ticket14511(self): alex = Person.objects.get_or_create(name='Alex')[0] jane = Person.objects.get_or_create(name='Jane')[0] oracle = Company.objects.get_or_create(name='Oracle')[0] google = Company.objects.get_or_create(name='Google')[0] microsoft = Company.objects.get_or_create(name='Microsoft')[0] intel = Company.objects.get_or_create(name='Intel')[0] def employ(employer, employee, title): Employment.objects.get_or_create(employee=employee, employer=employer, title=title) employ(oracle, alex, 'Engineer') employ(oracle, alex, 'Developer') employ(google, alex, 'Engineer') employ(google, alex, 'Manager') employ(microsoft, alex, 'Manager') employ(intel, alex, 'Manager') employ(microsoft, jane, 'Developer') employ(intel, jane, 'Manager') alex_tech_employers = alex.employers.filter( employment__title__in=('Engineer', 'Developer')).distinct().order_by('name') self.assertSequenceEqual(alex_tech_employers, [google, oracle]) alex_nontech_employers = alex.employers.exclude( employment__title__in=('Engineer', 'Developer')).distinct().order_by('name') self.assertSequenceEqual(alex_nontech_employers, [google, intel, microsoft]) def test_exclude_reverse_fk_field_ref(self): tag = Tag.objects.create() Note.objects.create(tag=tag, note='note') annotation = Annotation.objects.create(name='annotation', tag=tag) self.assertEqual(Annotation.objects.exclude(tag__note__note=F('name')).get(), annotation) def test_exclude_with_circular_fk_relation(self): self.assertEqual(ObjectB.objects.exclude(objecta__objectb__name=F('name')).count(), 0) class ExcludeTest17600(TestCase): """ Some regressiontests for ticket #17600. Some of these likely duplicate other existing tests. """ @classmethod def setUpTestData(cls): # Create a few Orders. cls.o1 = Order.objects.create(pk=1) cls.o2 = Order.objects.create(pk=2) cls.o3 = Order.objects.create(pk=3) # Create some OrderItems for the first order with homogeneous # status_id values cls.oi1 = OrderItem.objects.create(order=cls.o1, status=1) cls.oi2 = OrderItem.objects.create(order=cls.o1, status=1) cls.oi3 = OrderItem.objects.create(order=cls.o1, status=1) # Create some OrderItems for the second order with heterogeneous # status_id values cls.oi4 = OrderItem.objects.create(order=cls.o2, status=1) cls.oi5 = OrderItem.objects.create(order=cls.o2, status=2) cls.oi6 = OrderItem.objects.create(order=cls.o2, status=3) # Create some OrderItems for the second order with heterogeneous # status_id values cls.oi7 = OrderItem.objects.create(order=cls.o3, status=2) cls.oi8 = OrderItem.objects.create(order=cls.o3, status=3) cls.oi9 = OrderItem.objects.create(order=cls.o3, status=4) def test_exclude_plain(self): """ This should exclude Orders which have some items with status 1 """ self.assertQuerysetEqual( Order.objects.exclude(items__status=1), ['<Order: 3>']) def test_exclude_plain_distinct(self): """ This should exclude Orders which have some items with status 1 """ self.assertQuerysetEqual( Order.objects.exclude(items__status=1).distinct(), ['<Order: 3>']) def test_exclude_with_q_object_distinct(self): """ This should exclude Orders which have some items with status 1 """ self.assertQuerysetEqual( Order.objects.exclude(Q(items__status=1)).distinct(), ['<Order: 3>']) def test_exclude_with_q_object_no_distinct(self): """ This should exclude Orders which have some items with status 1 """ self.assertQuerysetEqual( Order.objects.exclude(Q(items__status=1)), ['<Order: 3>']) def test_exclude_with_q_is_equal_to_plain_exclude(self): """ Using exclude(condition) and exclude(Q(condition)) should yield the same QuerySet """ self.assertEqual( list(Order.objects.exclude(items__status=1).distinct()), list(Order.objects.exclude(Q(items__status=1)).distinct())) def test_exclude_with_q_is_equal_to_plain_exclude_variation(self): """ Using exclude(condition) and exclude(Q(condition)) should yield the same QuerySet """ self.assertEqual( list(Order.objects.exclude(items__status=1)), list(Order.objects.exclude(Q(items__status=1)).distinct())) @unittest.expectedFailure def test_only_orders_with_all_items_having_status_1(self): """ This should only return orders having ALL items set to status 1, or those items not having any orders at all. The correct way to write this query in SQL seems to be using two nested subqueries. """ self.assertQuerysetEqual( Order.objects.exclude(~Q(items__status=1)).distinct(), ['<Order: 1>']) class Exclude15786(TestCase): """Regression test for #15786""" def test_ticket15786(self): c1 = SimpleCategory.objects.create(name='c1') c2 = SimpleCategory.objects.create(name='c2') OneToOneCategory.objects.create(category=c1) OneToOneCategory.objects.create(category=c2) rel = CategoryRelationship.objects.create(first=c1, second=c2) self.assertEqual( CategoryRelationship.objects.exclude( first__onetoonecategory=F('second__onetoonecategory') ).get(), rel ) class NullInExcludeTest(TestCase): @classmethod def setUpTestData(cls): NullableName.objects.create(name='i1') NullableName.objects.create() def test_null_in_exclude_qs(self): none_val = '' if connection.features.interprets_empty_strings_as_nulls else None self.assertQuerysetEqual( NullableName.objects.exclude(name__in=[]), ['i1', none_val], attrgetter('name')) self.assertQuerysetEqual( NullableName.objects.exclude(name__in=['i1']), [none_val], attrgetter('name')) self.assertQuerysetEqual( NullableName.objects.exclude(name__in=['i3']), ['i1', none_val], attrgetter('name')) inner_qs = NullableName.objects.filter(name='i1').values_list('name') self.assertQuerysetEqual( NullableName.objects.exclude(name__in=inner_qs), [none_val], attrgetter('name')) # The inner queryset wasn't executed - it should be turned # into subquery above self.assertIs(inner_qs._result_cache, None) @unittest.expectedFailure def test_col_not_in_list_containing_null(self): """ The following case is not handled properly because SQL's COL NOT IN (list containing null) handling is too weird to abstract away. """ self.assertQuerysetEqual( NullableName.objects.exclude(name__in=[None]), ['i1'], attrgetter('name')) def test_double_exclude(self): self.assertEqual( list(NullableName.objects.filter(~~Q(name='i1'))), list(NullableName.objects.filter(Q(name='i1')))) self.assertNotIn( 'IS NOT NULL', str(NullableName.objects.filter(~~Q(name='i1')).query)) class EmptyStringsAsNullTest(TestCase): """ Filtering on non-null character fields works as expected. The reason for these tests is that Oracle treats '' as NULL, and this can cause problems in query construction. Refs #17957. """ @classmethod def setUpTestData(cls): cls.nc = NamedCategory.objects.create(name='') def test_direct_exclude(self): self.assertQuerysetEqual( NamedCategory.objects.exclude(name__in=['nonexistent']), [self.nc.pk], attrgetter('pk') ) def test_joined_exclude(self): self.assertQuerysetEqual( DumbCategory.objects.exclude(namedcategory__name__in=['nonexistent']), [self.nc.pk], attrgetter('pk') ) def test_21001(self): foo = NamedCategory.objects.create(name='foo') self.assertQuerysetEqual( NamedCategory.objects.exclude(name=''), [foo.pk], attrgetter('pk') ) class ProxyQueryCleanupTest(TestCase): def test_evaluated_proxy_count(self): """ Generating the query string doesn't alter the query's state in irreversible ways. Refs #18248. """ ProxyCategory.objects.create() qs = ProxyCategory.objects.all() self.assertEqual(qs.count(), 1) str(qs.query) self.assertEqual(qs.count(), 1) class WhereNodeTest(SimpleTestCase): class DummyNode: def as_sql(self, compiler, connection): return 'dummy', [] class MockCompiler: def compile(self, node): return node.as_sql(self, connection) def __call__(self, name): return connection.ops.quote_name(name) def test_empty_full_handling_conjunction(self): compiler = WhereNodeTest.MockCompiler() w = WhereNode(children=[NothingNode()]) with self.assertRaises(EmptyResultSet): w.as_sql(compiler, connection) w.negate() self.assertEqual(w.as_sql(compiler, connection), ('', [])) w = WhereNode(children=[self.DummyNode(), self.DummyNode()]) self.assertEqual(w.as_sql(compiler, connection), ('(dummy AND dummy)', [])) w.negate() self.assertEqual(w.as_sql(compiler, connection), ('NOT (dummy AND dummy)', [])) w = WhereNode(children=[NothingNode(), self.DummyNode()]) with self.assertRaises(EmptyResultSet): w.as_sql(compiler, connection) w.negate() self.assertEqual(w.as_sql(compiler, connection), ('', [])) def test_empty_full_handling_disjunction(self): compiler = WhereNodeTest.MockCompiler() w = WhereNode(children=[NothingNode()], connector='OR') with self.assertRaises(EmptyResultSet): w.as_sql(compiler, connection) w.negate() self.assertEqual(w.as_sql(compiler, connection), ('', [])) w = WhereNode(children=[self.DummyNode(), self.DummyNode()], connector='OR') self.assertEqual(w.as_sql(compiler, connection), ('(dummy OR dummy)', [])) w.negate() self.assertEqual(w.as_sql(compiler, connection), ('NOT (dummy OR dummy)', [])) w = WhereNode(children=[NothingNode(), self.DummyNode()], connector='OR') self.assertEqual(w.as_sql(compiler, connection), ('dummy', [])) w.negate() self.assertEqual(w.as_sql(compiler, connection), ('NOT (dummy)', [])) def test_empty_nodes(self): compiler = WhereNodeTest.MockCompiler() empty_w = WhereNode() w = WhereNode(children=[empty_w, empty_w]) self.assertEqual(w.as_sql(compiler, connection), ('', [])) w.negate() with self.assertRaises(EmptyResultSet): w.as_sql(compiler, connection) w.connector = 'OR' with self.assertRaises(EmptyResultSet): w.as_sql(compiler, connection) w.negate() self.assertEqual(w.as_sql(compiler, connection), ('', [])) w = WhereNode(children=[empty_w, NothingNode()], connector='OR') self.assertEqual(w.as_sql(compiler, connection), ('', [])) w = WhereNode(children=[empty_w, NothingNode()], connector='AND') with self.assertRaises(EmptyResultSet): w.as_sql(compiler, connection) class QuerySetExceptionTests(SimpleTestCase): def test_iter_exceptions(self): qs = ExtraInfo.objects.only('author') msg = "'ManyToOneRel' object has no attribute 'attname'" with self.assertRaisesMessage(AttributeError, msg): list(qs) def test_invalid_qs_list(self): # Test for #19895 - second iteration over invalid queryset # raises errors. qs = Article.objects.order_by('invalid_column') msg = "Cannot resolve keyword 'invalid_column' into field." with self.assertRaisesMessage(FieldError, msg): list(qs) with self.assertRaisesMessage(FieldError, msg): list(qs) def test_invalid_order_by(self): msg = "Invalid order_by arguments: ['*']" with self.assertRaisesMessage(FieldError, msg): list(Article.objects.order_by('*')) def test_invalid_queryset_model(self): msg = 'Cannot use QuerySet for "Article": Use a QuerySet for "ExtraInfo".' with self.assertRaisesMessage(ValueError, msg): list(Author.objects.filter(extra=Article.objects.all())) class NullJoinPromotionOrTest(TestCase): @classmethod def setUpTestData(cls): cls.d1 = ModelD.objects.create(name='foo') d2 = ModelD.objects.create(name='bar') cls.a1 = ModelA.objects.create(name='a1', d=cls.d1) c = ModelC.objects.create(name='c') b = ModelB.objects.create(name='b', c=c) cls.a2 = ModelA.objects.create(name='a2', b=b, d=d2) def test_ticket_17886(self): # The first Q-object is generating the match, the rest of the filters # should not remove the match even if they do not match anything. The # problem here was that b__name generates a LOUTER JOIN, then # b__c__name generates join to c, which the ORM tried to promote but # failed as that join isn't nullable. q_obj = ( Q(d__name='foo') | Q(b__name='foo') | Q(b__c__name='foo') ) qset = ModelA.objects.filter(q_obj) self.assertEqual(list(qset), [self.a1]) # We generate one INNER JOIN to D. The join is direct and not nullable # so we can use INNER JOIN for it. However, we can NOT use INNER JOIN # for the b->c join, as a->b is nullable. self.assertEqual(str(qset.query).count('INNER JOIN'), 1) def test_isnull_filter_promotion(self): qs = ModelA.objects.filter(Q(b__name__isnull=True)) self.assertEqual(str(qs.query).count('LEFT OUTER'), 1) self.assertEqual(list(qs), [self.a1]) qs = ModelA.objects.filter(~Q(b__name__isnull=True)) self.assertEqual(str(qs.query).count('INNER JOIN'), 1) self.assertEqual(list(qs), [self.a2]) qs = ModelA.objects.filter(~~Q(b__name__isnull=True)) self.assertEqual(str(qs.query).count('LEFT OUTER'), 1) self.assertEqual(list(qs), [self.a1]) qs = ModelA.objects.filter(Q(b__name__isnull=False)) self.assertEqual(str(qs.query).count('INNER JOIN'), 1) self.assertEqual(list(qs), [self.a2]) qs = ModelA.objects.filter(~Q(b__name__isnull=False)) self.assertEqual(str(qs.query).count('LEFT OUTER'), 1) self.assertEqual(list(qs), [self.a1]) qs = ModelA.objects.filter(~~Q(b__name__isnull=False)) self.assertEqual(str(qs.query).count('INNER JOIN'), 1) self.assertEqual(list(qs), [self.a2]) def test_null_join_demotion(self): qs = ModelA.objects.filter(Q(b__name__isnull=False) & Q(b__name__isnull=True)) self.assertIn(' INNER JOIN ', str(qs.query)) qs = ModelA.objects.filter(Q(b__name__isnull=True) & Q(b__name__isnull=False)) self.assertIn(' INNER JOIN ', str(qs.query)) qs = ModelA.objects.filter(Q(b__name__isnull=False) | Q(b__name__isnull=True)) self.assertIn(' LEFT OUTER JOIN ', str(qs.query)) qs = ModelA.objects.filter(Q(b__name__isnull=True) | Q(b__name__isnull=False)) self.assertIn(' LEFT OUTER JOIN ', str(qs.query)) def test_ticket_21366(self): n = Note.objects.create(note='n', misc='m') e = ExtraInfo.objects.create(info='info', note=n) a = Author.objects.create(name='Author1', num=1, extra=e) Ranking.objects.create(rank=1, author=a) r1 = Report.objects.create(name='Foo', creator=a) r2 = Report.objects.create(name='Bar') Report.objects.create(name='Bar', creator=a) qs = Report.objects.filter( Q(creator__ranking__isnull=True) | Q(creator__ranking__rank=1, name='Foo') ) self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2) self.assertEqual(str(qs.query).count(' JOIN '), 2) self.assertSequenceEqual(qs.order_by('name'), [r2, r1]) def test_ticket_21748(self): i1 = Identifier.objects.create(name='i1') i2 = Identifier.objects.create(name='i2') i3 = Identifier.objects.create(name='i3') Program.objects.create(identifier=i1) Channel.objects.create(identifier=i1) Program.objects.create(identifier=i2) self.assertSequenceEqual(Identifier.objects.filter(program=None, channel=None), [i3]) self.assertSequenceEqual(Identifier.objects.exclude(program=None, channel=None).order_by('name'), [i1, i2]) def test_ticket_21748_double_negated_and(self): i1 = Identifier.objects.create(name='i1') i2 = Identifier.objects.create(name='i2') Identifier.objects.create(name='i3') p1 = Program.objects.create(identifier=i1) c1 = Channel.objects.create(identifier=i1) Program.objects.create(identifier=i2) # Check the ~~Q() (or equivalently .exclude(~Q)) works like Q() for # join promotion. qs1_doubleneg = Identifier.objects.exclude(~Q(program__id=p1.id, channel__id=c1.id)).order_by('pk') qs1_filter = Identifier.objects.filter(program__id=p1.id, channel__id=c1.id).order_by('pk') self.assertQuerysetEqual(qs1_doubleneg, qs1_filter, lambda x: x) self.assertEqual(str(qs1_filter.query).count('JOIN'), str(qs1_doubleneg.query).count('JOIN')) self.assertEqual(2, str(qs1_doubleneg.query).count('INNER JOIN')) self.assertEqual(str(qs1_filter.query).count('INNER JOIN'), str(qs1_doubleneg.query).count('INNER JOIN')) def test_ticket_21748_double_negated_or(self): i1 = Identifier.objects.create(name='i1') i2 = Identifier.objects.create(name='i2') Identifier.objects.create(name='i3') p1 = Program.objects.create(identifier=i1) c1 = Channel.objects.create(identifier=i1) p2 = Program.objects.create(identifier=i2) # Test OR + doubleneg. The expected result is that channel is LOUTER # joined, program INNER joined qs1_filter = Identifier.objects.filter( Q(program__id=p2.id, channel__id=c1.id) | Q(program__id=p1.id) ).order_by('pk') qs1_doubleneg = Identifier.objects.exclude( ~Q(Q(program__id=p2.id, channel__id=c1.id) | Q(program__id=p1.id)) ).order_by('pk') self.assertQuerysetEqual(qs1_doubleneg, qs1_filter, lambda x: x) self.assertEqual(str(qs1_filter.query).count('JOIN'), str(qs1_doubleneg.query).count('JOIN')) self.assertEqual(1, str(qs1_doubleneg.query).count('INNER JOIN')) self.assertEqual(str(qs1_filter.query).count('INNER JOIN'), str(qs1_doubleneg.query).count('INNER JOIN')) def test_ticket_21748_complex_filter(self): i1 = Identifier.objects.create(name='i1') i2 = Identifier.objects.create(name='i2') Identifier.objects.create(name='i3') p1 = Program.objects.create(identifier=i1) c1 = Channel.objects.create(identifier=i1) p2 = Program.objects.create(identifier=i2) # Finally, a more complex case, one time in a way where each # NOT is pushed to lowest level in the boolean tree, and # another query where this isn't done. qs1 = Identifier.objects.filter( ~Q(~Q(program__id=p2.id, channel__id=c1.id) & Q(program__id=p1.id)) ).order_by('pk') qs2 = Identifier.objects.filter( Q(Q(program__id=p2.id, channel__id=c1.id) | ~Q(program__id=p1.id)) ).order_by('pk') self.assertQuerysetEqual(qs1, qs2, lambda x: x) self.assertEqual(str(qs1.query).count('JOIN'), str(qs2.query).count('JOIN')) self.assertEqual(0, str(qs1.query).count('INNER JOIN')) self.assertEqual(str(qs1.query).count('INNER JOIN'), str(qs2.query).count('INNER JOIN')) class ReverseJoinTrimmingTest(TestCase): def test_reverse_trimming(self): # We don't accidentally trim reverse joins - we can't know if there is # anything on the other side of the join, so trimming reverse joins # can't be done, ever. t = Tag.objects.create() qs = Tag.objects.filter(annotation__tag=t.pk) self.assertIn('INNER JOIN', str(qs.query)) self.assertEqual(list(qs), []) class JoinReuseTest(TestCase): """ The queries reuse joins sensibly (for example, direct joins are always reused). """ def test_fk_reuse(self): qs = Annotation.objects.filter(tag__name='foo').filter(tag__name='bar') self.assertEqual(str(qs.query).count('JOIN'), 1) def test_fk_reuse_select_related(self): qs = Annotation.objects.filter(tag__name='foo').select_related('tag') self.assertEqual(str(qs.query).count('JOIN'), 1) def test_fk_reuse_annotation(self): qs = Annotation.objects.filter(tag__name='foo').annotate(cnt=Count('tag__name')) self.assertEqual(str(qs.query).count('JOIN'), 1) def test_fk_reuse_disjunction(self): qs = Annotation.objects.filter(Q(tag__name='foo') | Q(tag__name='bar')) self.assertEqual(str(qs.query).count('JOIN'), 1) def test_fk_reuse_order_by(self): qs = Annotation.objects.filter(tag__name='foo').order_by('tag__name') self.assertEqual(str(qs.query).count('JOIN'), 1) def test_revo2o_reuse(self): qs = Detail.objects.filter(member__name='foo').filter(member__name='foo') self.assertEqual(str(qs.query).count('JOIN'), 1) def test_revfk_noreuse(self): qs = Author.objects.filter(report__name='r4').filter(report__name='r1') self.assertEqual(str(qs.query).count('JOIN'), 2) def test_inverted_q_across_relations(self): """ When a trimmable join is specified in the query (here school__), the ORM detects it and removes unnecessary joins. The set of reusable joins are updated after trimming the query so that other lookups don't consider that the outer query's filters are in effect for the subquery (#26551). """ springfield_elementary = School.objects.create() hogward = School.objects.create() Student.objects.create(school=springfield_elementary) hp = Student.objects.create(school=hogward) Classroom.objects.create(school=hogward, name='Potion') Classroom.objects.create(school=springfield_elementary, name='Main') qs = Student.objects.filter( ~(Q(school__classroom__name='Main') & Q(school__classroom__has_blackboard=None)) ) self.assertSequenceEqual(qs, [hp]) class DisjunctionPromotionTests(TestCase): def test_disjunction_promotion_select_related(self): fk1 = FK1.objects.create(f1='f1', f2='f2') basea = BaseA.objects.create(a=fk1) qs = BaseA.objects.filter(Q(a=fk1) | Q(b=2)) self.assertEqual(str(qs.query).count(' JOIN '), 0) qs = qs.select_related('a', 'b') self.assertEqual(str(qs.query).count(' INNER JOIN '), 0) self.assertEqual(str(qs.query).count(' LEFT OUTER JOIN '), 2) with self.assertNumQueries(1): self.assertSequenceEqual(qs, [basea]) self.assertEqual(qs[0].a, fk1) self.assertIs(qs[0].b, None) def test_disjunction_promotion1(self): # Pre-existing join, add two ORed filters to the same join, # all joins can be INNER JOINS. qs = BaseA.objects.filter(a__f1='foo') self.assertEqual(str(qs.query).count('INNER JOIN'), 1) qs = qs.filter(Q(b__f1='foo') | Q(b__f2='foo')) self.assertEqual(str(qs.query).count('INNER JOIN'), 2) # Reverse the order of AND and OR filters. qs = BaseA.objects.filter(Q(b__f1='foo') | Q(b__f2='foo')) self.assertEqual(str(qs.query).count('INNER JOIN'), 1) qs = qs.filter(a__f1='foo') self.assertEqual(str(qs.query).count('INNER JOIN'), 2) def test_disjunction_promotion2(self): qs = BaseA.objects.filter(a__f1='foo') self.assertEqual(str(qs.query).count('INNER JOIN'), 1) # Now we have two different joins in an ORed condition, these # must be OUTER joins. The pre-existing join should remain INNER. qs = qs.filter(Q(b__f1='foo') | Q(c__f2='foo')) self.assertEqual(str(qs.query).count('INNER JOIN'), 1) self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2) # Reverse case. qs = BaseA.objects.filter(Q(b__f1='foo') | Q(c__f2='foo')) self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2) qs = qs.filter(a__f1='foo') self.assertEqual(str(qs.query).count('INNER JOIN'), 1) self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2) def test_disjunction_promotion3(self): qs = BaseA.objects.filter(a__f2='bar') self.assertEqual(str(qs.query).count('INNER JOIN'), 1) # The ANDed a__f2 filter allows us to use keep using INNER JOIN # even inside the ORed case. If the join to a__ returns nothing, # the ANDed filter for a__f2 can't be true. qs = qs.filter(Q(a__f1='foo') | Q(b__f2='foo')) self.assertEqual(str(qs.query).count('INNER JOIN'), 1) self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1) def test_disjunction_promotion3_demote(self): # This one needs demotion logic: the first filter causes a to be # outer joined, the second filter makes it inner join again. qs = BaseA.objects.filter( Q(a__f1='foo') | Q(b__f2='foo')).filter(a__f2='bar') self.assertEqual(str(qs.query).count('INNER JOIN'), 1) self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1) def test_disjunction_promotion4_demote(self): qs = BaseA.objects.filter(Q(a=1) | Q(a=2)) self.assertEqual(str(qs.query).count('JOIN'), 0) # Demote needed for the "a" join. It is marked as outer join by # above filter (even if it is trimmed away). qs = qs.filter(a__f1='foo') self.assertEqual(str(qs.query).count('INNER JOIN'), 1) def test_disjunction_promotion4(self): qs = BaseA.objects.filter(a__f1='foo') self.assertEqual(str(qs.query).count('INNER JOIN'), 1) qs = qs.filter(Q(a=1) | Q(a=2)) self.assertEqual(str(qs.query).count('INNER JOIN'), 1) def test_disjunction_promotion5_demote(self): qs = BaseA.objects.filter(Q(a=1) | Q(a=2)) # Note that the above filters on a force the join to an # inner join even if it is trimmed. self.assertEqual(str(qs.query).count('JOIN'), 0) qs = qs.filter(Q(a__f1='foo') | Q(b__f1='foo')) # So, now the a__f1 join doesn't need promotion. self.assertEqual(str(qs.query).count('INNER JOIN'), 1) # But b__f1 does. self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1) qs = BaseA.objects.filter(Q(a__f1='foo') | Q(b__f1='foo')) # Now the join to a is created as LOUTER self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2) qs = qs.filter(Q(a=1) | Q(a=2)) self.assertEqual(str(qs.query).count('INNER JOIN'), 1) self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1) def test_disjunction_promotion6(self): qs = BaseA.objects.filter(Q(a=1) | Q(a=2)) self.assertEqual(str(qs.query).count('JOIN'), 0) qs = BaseA.objects.filter(Q(a__f1='foo') & Q(b__f1='foo')) self.assertEqual(str(qs.query).count('INNER JOIN'), 2) self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 0) qs = BaseA.objects.filter(Q(a__f1='foo') & Q(b__f1='foo')) self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 0) self.assertEqual(str(qs.query).count('INNER JOIN'), 2) qs = qs.filter(Q(a=1) | Q(a=2)) self.assertEqual(str(qs.query).count('INNER JOIN'), 2) self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 0) def test_disjunction_promotion7(self): qs = BaseA.objects.filter(Q(a=1) | Q(a=2)) self.assertEqual(str(qs.query).count('JOIN'), 0) qs = BaseA.objects.filter(Q(a__f1='foo') | (Q(b__f1='foo') & Q(a__f1='bar'))) self.assertEqual(str(qs.query).count('INNER JOIN'), 1) self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1) qs = BaseA.objects.filter( (Q(a__f1='foo') | Q(b__f1='foo')) & (Q(a__f1='bar') | Q(c__f1='foo')) ) self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 3) self.assertEqual(str(qs.query).count('INNER JOIN'), 0) qs = BaseA.objects.filter( (Q(a__f1='foo') | (Q(a__f1='bar')) & (Q(b__f1='bar') | Q(c__f1='foo'))) ) self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2) self.assertEqual(str(qs.query).count('INNER JOIN'), 1) def test_disjunction_promotion_fexpression(self): qs = BaseA.objects.filter(Q(a__f1=F('b__f1')) | Q(b__f1='foo')) self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1) self.assertEqual(str(qs.query).count('INNER JOIN'), 1) qs = BaseA.objects.filter(Q(a__f1=F('c__f1')) | Q(b__f1='foo')) self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 3) qs = BaseA.objects.filter(Q(a__f1=F('b__f1')) | Q(a__f2=F('b__f2')) | Q(c__f1='foo')) self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 3) qs = BaseA.objects.filter(Q(a__f1=F('c__f1')) | (Q(pk=1) & Q(pk=2))) self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2) self.assertEqual(str(qs.query).count('INNER JOIN'), 0) class ManyToManyExcludeTest(TestCase): def test_exclude_many_to_many(self): Identifier.objects.create(name='extra') program = Program.objects.create(identifier=Identifier.objects.create(name='program')) channel = Channel.objects.create(identifier=Identifier.objects.create(name='channel')) channel.programs.add(program) # channel contains 'program1', so all Identifiers except that one # should be returned self.assertQuerysetEqual( Identifier.objects.exclude(program__channel=channel).order_by('name'), ['<Identifier: channel>', '<Identifier: extra>'] ) self.assertQuerysetEqual( Identifier.objects.exclude(program__channel=None).order_by('name'), ['<Identifier: program>'] ) def test_ticket_12823(self): pg3 = Page.objects.create(text='pg3') pg2 = Page.objects.create(text='pg2') pg1 = Page.objects.create(text='pg1') pa1 = Paragraph.objects.create(text='pa1') pa1.page.set([pg1, pg2]) pa2 = Paragraph.objects.create(text='pa2') pa2.page.set([pg2, pg3]) pa3 = Paragraph.objects.create(text='pa3') ch1 = Chapter.objects.create(title='ch1', paragraph=pa1) ch2 = Chapter.objects.create(title='ch2', paragraph=pa2) ch3 = Chapter.objects.create(title='ch3', paragraph=pa3) b1 = Book.objects.create(title='b1', chapter=ch1) b2 = Book.objects.create(title='b2', chapter=ch2) b3 = Book.objects.create(title='b3', chapter=ch3) q = Book.objects.exclude(chapter__paragraph__page__text='pg1') self.assertNotIn('IS NOT NULL', str(q.query)) self.assertEqual(len(q), 2) self.assertNotIn(b1, q) self.assertIn(b2, q) self.assertIn(b3, q) class RelabelCloneTest(TestCase): def test_ticket_19964(self): my1 = MyObject.objects.create(data='foo') my1.parent = my1 my1.save() my2 = MyObject.objects.create(data='bar', parent=my1) parents = MyObject.objects.filter(parent=F('id')) children = MyObject.objects.filter(parent__in=parents).exclude(parent=F('id')) self.assertEqual(list(parents), [my1]) # Evaluating the children query (which has parents as part of it) does # not change results for the parents query. self.assertEqual(list(children), [my2]) self.assertEqual(list(parents), [my1]) class Ticket20101Tests(TestCase): def test_ticket_20101(self): """ Tests QuerySet ORed combining in exclude subquery case. """ t = Tag.objects.create(name='foo') a1 = Annotation.objects.create(tag=t, name='a1') a2 = Annotation.objects.create(tag=t, name='a2') a3 = Annotation.objects.create(tag=t, name='a3') n = Note.objects.create(note='foo', misc='bar') qs1 = Note.objects.exclude(annotation__in=[a1, a2]) qs2 = Note.objects.filter(annotation__in=[a3]) self.assertIn(n, qs1) self.assertNotIn(n, qs2) self.assertIn(n, (qs1 | qs2)) class EmptyStringPromotionTests(SimpleTestCase): def test_empty_string_promotion(self): qs = RelatedObject.objects.filter(single__name='') if connection.features.interprets_empty_strings_as_nulls: self.assertIn('LEFT OUTER JOIN', str(qs.query)) else: self.assertNotIn('LEFT OUTER JOIN', str(qs.query)) class ValuesSubqueryTests(TestCase): def test_values_in_subquery(self): # If a values() queryset is used, then the given values # will be used instead of forcing use of the relation's field. o1 = Order.objects.create(id=-2) o2 = Order.objects.create(id=-1) oi1 = OrderItem.objects.create(order=o1, status=0) oi1.status = oi1.pk oi1.save() OrderItem.objects.create(order=o2, status=0) # The query below should match o1 as it has related order_item # with id == status. self.assertSequenceEqual(Order.objects.filter(items__in=OrderItem.objects.values_list('status')), [o1]) class DoubleInSubqueryTests(TestCase): def test_double_subquery_in(self): lfa1 = LeafA.objects.create(data='foo') lfa2 = LeafA.objects.create(data='bar') lfb1 = LeafB.objects.create(data='lfb1') lfb2 = LeafB.objects.create(data='lfb2') Join.objects.create(a=lfa1, b=lfb1) Join.objects.create(a=lfa2, b=lfb2) leaf_as = LeafA.objects.filter(data='foo').values_list('pk', flat=True) joins = Join.objects.filter(a__in=leaf_as).values_list('b__id', flat=True) qs = LeafB.objects.filter(pk__in=joins) self.assertSequenceEqual(qs, [lfb1]) class Ticket18785Tests(SimpleTestCase): def test_ticket_18785(self): # Test join trimming from ticket18785 qs = Item.objects.exclude( note__isnull=False ).filter( name='something', creator__extra__isnull=True ).order_by() self.assertEqual(1, str(qs.query).count('INNER JOIN')) self.assertEqual(0, str(qs.query).count('OUTER JOIN')) class Ticket20788Tests(TestCase): def test_ticket_20788(self): Paragraph.objects.create() paragraph = Paragraph.objects.create() page = paragraph.page.create() chapter = Chapter.objects.create(paragraph=paragraph) Book.objects.create(chapter=chapter) paragraph2 = Paragraph.objects.create() Page.objects.create() chapter2 = Chapter.objects.create(paragraph=paragraph2) book2 = Book.objects.create(chapter=chapter2) sentences_not_in_pub = Book.objects.exclude(chapter__paragraph__page=page) self.assertSequenceEqual(sentences_not_in_pub, [book2]) class Ticket12807Tests(TestCase): def test_ticket_12807(self): p1 = Paragraph.objects.create() p2 = Paragraph.objects.create() # The ORed condition below should have no effect on the query - the # ~Q(pk__in=[]) will always be True. qs = Paragraph.objects.filter((Q(pk=p2.pk) | ~Q(pk__in=[])) & Q(pk=p1.pk)) self.assertSequenceEqual(qs, [p1]) class RelatedLookupTypeTests(TestCase): error = 'Cannot query "%s": Must be "%s" instance.' @classmethod def setUpTestData(cls): cls.oa = ObjectA.objects.create(name="oa") cls.poa = ProxyObjectA.objects.get(name="oa") cls.coa = ChildObjectA.objects.create(name="coa") cls.wrong_type = Order.objects.create(id=cls.oa.pk) cls.ob = ObjectB.objects.create(name="ob", objecta=cls.oa, num=1) ProxyObjectB.objects.create(name="pob", objecta=cls.oa, num=2) cls.pob = ProxyObjectB.objects.all() ObjectC.objects.create(childobjecta=cls.coa) def test_wrong_type_lookup(self): """ A ValueError is raised when the incorrect object type is passed to a query lookup. """ # Passing incorrect object type with self.assertRaisesMessage(ValueError, self.error % (self.wrong_type, ObjectA._meta.object_name)): ObjectB.objects.get(objecta=self.wrong_type) with self.assertRaisesMessage(ValueError, self.error % (self.wrong_type, ObjectA._meta.object_name)): ObjectB.objects.filter(objecta__in=[self.wrong_type]) with self.assertRaisesMessage(ValueError, self.error % (self.wrong_type, ObjectA._meta.object_name)): ObjectB.objects.filter(objecta=self.wrong_type) with self.assertRaisesMessage(ValueError, self.error % (self.wrong_type, ObjectB._meta.object_name)): ObjectA.objects.filter(objectb__in=[self.wrong_type, self.ob]) # Passing an object of the class on which query is done. with self.assertRaisesMessage(ValueError, self.error % (self.ob, ObjectA._meta.object_name)): ObjectB.objects.filter(objecta__in=[self.poa, self.ob]) with self.assertRaisesMessage(ValueError, self.error % (self.ob, ChildObjectA._meta.object_name)): ObjectC.objects.exclude(childobjecta__in=[self.coa, self.ob]) def test_wrong_backward_lookup(self): """ A ValueError is raised when the incorrect object type is passed to a query lookup for backward relations. """ with self.assertRaisesMessage(ValueError, self.error % (self.oa, ObjectB._meta.object_name)): ObjectA.objects.filter(objectb__in=[self.oa, self.ob]) with self.assertRaisesMessage(ValueError, self.error % (self.oa, ObjectB._meta.object_name)): ObjectA.objects.exclude(objectb=self.oa) with self.assertRaisesMessage(ValueError, self.error % (self.wrong_type, ObjectB._meta.object_name)): ObjectA.objects.get(objectb=self.wrong_type) def test_correct_lookup(self): """ When passing proxy model objects, child objects, or parent objects, lookups work fine. """ out_a = ['<ObjectA: oa>'] out_b = ['<ObjectB: ob>', '<ObjectB: pob>'] out_c = ['<ObjectC: >'] # proxy model objects self.assertQuerysetEqual(ObjectB.objects.filter(objecta=self.poa).order_by('name'), out_b) self.assertQuerysetEqual(ObjectA.objects.filter(objectb__in=self.pob).order_by('pk'), out_a * 2) # child objects self.assertQuerysetEqual(ObjectB.objects.filter(objecta__in=[self.coa]), []) self.assertQuerysetEqual(ObjectB.objects.filter(objecta__in=[self.poa, self.coa]).order_by('name'), out_b) self.assertQuerysetEqual( ObjectB.objects.filter(objecta__in=iter([self.poa, self.coa])).order_by('name'), out_b ) # parent objects self.assertQuerysetEqual(ObjectC.objects.exclude(childobjecta=self.oa), out_c) # QuerySet related object type checking shouldn't issue queries # (the querysets aren't evaluated here, hence zero queries) (#23266). with self.assertNumQueries(0): ObjectB.objects.filter(objecta__in=ObjectA.objects.all()) def test_values_queryset_lookup(self): """ #23396 - Ensure ValueQuerySets are not checked for compatibility with the lookup field """ # Make sure the num and objecta field values match. ob = ObjectB.objects.get(name='ob') ob.num = ob.objecta.pk ob.save() pob = ObjectB.objects.get(name='pob') pob.num = pob.objecta.pk pob.save() self.assertQuerysetEqual(ObjectB.objects.filter( objecta__in=ObjectB.objects.all().values_list('num') ).order_by('pk'), ['<ObjectB: ob>', '<ObjectB: pob>']) class Ticket14056Tests(TestCase): def test_ticket_14056(self): s1 = SharedConnection.objects.create(data='s1') s2 = SharedConnection.objects.create(data='s2') s3 = SharedConnection.objects.create(data='s3') PointerA.objects.create(connection=s2) expected_ordering = ( [s1, s3, s2] if connection.features.nulls_order_largest else [s2, s1, s3] ) self.assertSequenceEqual(SharedConnection.objects.order_by('-pointera__connection', 'pk'), expected_ordering) class Ticket20955Tests(TestCase): def test_ticket_20955(self): jack = Staff.objects.create(name='jackstaff') jackstaff = StaffUser.objects.create(staff=jack) jill = Staff.objects.create(name='jillstaff') jillstaff = StaffUser.objects.create(staff=jill) task = Task.objects.create(creator=jackstaff, owner=jillstaff, title="task") task_get = Task.objects.get(pk=task.pk) # Load data so that assertNumQueries doesn't complain about the get # version's queries. task_get.creator.staffuser.staff task_get.owner.staffuser.staff qs = Task.objects.select_related( 'creator__staffuser__staff', 'owner__staffuser__staff') self.assertEqual(str(qs.query).count(' JOIN '), 6) task_select_related = qs.get(pk=task.pk) with self.assertNumQueries(0): self.assertEqual(task_select_related.creator.staffuser.staff, task_get.creator.staffuser.staff) self.assertEqual(task_select_related.owner.staffuser.staff, task_get.owner.staffuser.staff) class Ticket21203Tests(TestCase): def test_ticket_21203(self): p = Ticket21203Parent.objects.create(parent_bool=True) c = Ticket21203Child.objects.create(parent=p) qs = Ticket21203Child.objects.select_related('parent').defer('parent__created') self.assertSequenceEqual(qs, [c]) self.assertIs(qs[0].parent.parent_bool, True) class ValuesJoinPromotionTests(TestCase): def test_values_no_promotion_for_existing(self): qs = Node.objects.filter(parent__parent__isnull=False) self.assertIn(' INNER JOIN ', str(qs.query)) qs = qs.values('parent__parent__id') self.assertIn(' INNER JOIN ', str(qs.query)) # Make sure there is a left outer join without the filter. qs = Node.objects.values('parent__parent__id') self.assertIn(' LEFT OUTER JOIN ', str(qs.query)) def test_non_nullable_fk_not_promoted(self): qs = ObjectB.objects.values('objecta__name') self.assertIn(' INNER JOIN ', str(qs.query)) def test_ticket_21376(self): a = ObjectA.objects.create() ObjectC.objects.create(objecta=a) qs = ObjectC.objects.filter( Q(objecta=a) | Q(objectb__objecta=a), ) qs = qs.filter( Q(objectb=1) | Q(objecta=a), ) self.assertEqual(qs.count(), 1) tblname = connection.ops.quote_name(ObjectB._meta.db_table) self.assertIn(' LEFT OUTER JOIN %s' % tblname, str(qs.query)) class ForeignKeyToBaseExcludeTests(TestCase): def test_ticket_21787(self): sc1 = SpecialCategory.objects.create(special_name='sc1', name='sc1') sc2 = SpecialCategory.objects.create(special_name='sc2', name='sc2') sc3 = SpecialCategory.objects.create(special_name='sc3', name='sc3') c1 = CategoryItem.objects.create(category=sc1) CategoryItem.objects.create(category=sc2) self.assertSequenceEqual(SpecialCategory.objects.exclude(categoryitem__id=c1.pk).order_by('name'), [sc2, sc3]) self.assertSequenceEqual(SpecialCategory.objects.filter(categoryitem__id=c1.pk), [sc1]) class ReverseM2MCustomPkTests(TestCase): def test_ticket_21879(self): cpt1 = CustomPkTag.objects.create(id='cpt1', tag='cpt1') cp1 = CustomPk.objects.create(name='cp1', extra='extra') cp1.custompktag_set.add(cpt1) self.assertSequenceEqual(CustomPk.objects.filter(custompktag=cpt1), [cp1]) self.assertSequenceEqual(CustomPkTag.objects.filter(custom_pk=cp1), [cpt1]) class Ticket22429Tests(TestCase): def test_ticket_22429(self): sc1 = School.objects.create() st1 = Student.objects.create(school=sc1) sc2 = School.objects.create() st2 = Student.objects.create(school=sc2) cr = Classroom.objects.create(school=sc1) cr.students.add(st1) queryset = Student.objects.filter(~Q(classroom__school=F('school'))) self.assertSequenceEqual(queryset, [st2]) class Ticket23605Tests(TestCase): def test_ticket_23605(self): # Test filtering on a complicated q-object from ticket's report. # The query structure is such that we have multiple nested subqueries. # The original problem was that the inner queries weren't relabeled # correctly. # See also #24090. a1 = Ticket23605A.objects.create() a2 = Ticket23605A.objects.create() c1 = Ticket23605C.objects.create(field_c0=10000.0) Ticket23605B.objects.create( field_b0=10000.0, field_b1=True, modelc_fk=c1, modela_fk=a1) complex_q = Q(pk__in=Ticket23605A.objects.filter( Q( # True for a1 as field_b0 = 10000, field_c0=10000 # False for a2 as no ticket23605b found ticket23605b__field_b0__gte=1000000 / F("ticket23605b__modelc_fk__field_c0") ) & # True for a1 (field_b1=True) Q(ticket23605b__field_b1=True) & ~Q(ticket23605b__pk__in=Ticket23605B.objects.filter( ~( # Same filters as above commented filters, but # double-negated (one for Q() above, one for # parentheses). So, again a1 match, a2 not. Q(field_b1=True) & Q(field_b0__gte=1000000 / F("modelc_fk__field_c0")) ) ))).filter(ticket23605b__field_b1=True)) qs1 = Ticket23605A.objects.filter(complex_q) self.assertSequenceEqual(qs1, [a1]) qs2 = Ticket23605A.objects.exclude(complex_q) self.assertSequenceEqual(qs2, [a2]) class TestTicket24279(TestCase): def test_ticket_24278(self): School.objects.create() qs = School.objects.filter(Q(pk__in=()) | Q()) self.assertQuerysetEqual(qs, []) class TestInvalidValuesRelation(SimpleTestCase): def test_invalid_values(self): msg = "invalid literal for int() with base 10: 'abc'" with self.assertRaisesMessage(ValueError, msg): Annotation.objects.filter(tag='abc') with self.assertRaisesMessage(ValueError, msg): Annotation.objects.filter(tag__in=[123, 'abc']) class TestTicket24605(TestCase): def test_ticket_24605(self): """ Subquery table names should be quoted. """ i1 = Individual.objects.create(alive=True) RelatedIndividual.objects.create(related=i1) i2 = Individual.objects.create(alive=False) RelatedIndividual.objects.create(related=i2) i3 = Individual.objects.create(alive=True) i4 = Individual.objects.create(alive=False) self.assertSequenceEqual(Individual.objects.filter(Q(alive=False), Q(related_individual__isnull=True)), [i4]) self.assertSequenceEqual( Individual.objects.exclude(Q(alive=False), Q(related_individual__isnull=True)).order_by('pk'), [i1, i2, i3] ) class Ticket23622Tests(TestCase): @skipUnlessDBFeature('can_distinct_on_fields') def test_ticket_23622(self): """ Make sure __pk__in and __in work the same for related fields when using a distinct on subquery. """ a1 = Ticket23605A.objects.create() a2 = Ticket23605A.objects.create() c1 = Ticket23605C.objects.create(field_c0=0.0) Ticket23605B.objects.create( modela_fk=a1, field_b0=123, field_b1=True, modelc_fk=c1, ) Ticket23605B.objects.create( modela_fk=a1, field_b0=23, field_b1=True, modelc_fk=c1, ) Ticket23605B.objects.create( modela_fk=a1, field_b0=234, field_b1=True, modelc_fk=c1, ) Ticket23605B.objects.create( modela_fk=a1, field_b0=12, field_b1=True, modelc_fk=c1, ) Ticket23605B.objects.create( modela_fk=a2, field_b0=567, field_b1=True, modelc_fk=c1, ) Ticket23605B.objects.create( modela_fk=a2, field_b0=76, field_b1=True, modelc_fk=c1, ) Ticket23605B.objects.create( modela_fk=a2, field_b0=7, field_b1=True, modelc_fk=c1, ) Ticket23605B.objects.create( modela_fk=a2, field_b0=56, field_b1=True, modelc_fk=c1, ) qx = ( Q(ticket23605b__pk__in=Ticket23605B.objects.order_by('modela_fk', '-field_b1').distinct('modela_fk')) & Q(ticket23605b__field_b0__gte=300) ) qy = ( Q(ticket23605b__in=Ticket23605B.objects.order_by('modela_fk', '-field_b1').distinct('modela_fk')) & Q(ticket23605b__field_b0__gte=300) ) self.assertEqual( set(Ticket23605A.objects.filter(qx).values_list('pk', flat=True)), set(Ticket23605A.objects.filter(qy).values_list('pk', flat=True)) ) self.assertSequenceEqual(Ticket23605A.objects.filter(qx), [a2])
00ab5ba6aad99d1f6245e31fe0ec67082d68e6043ea6775dc277a52b7ea15893
from django.db.models import Exists, F, IntegerField, OuterRef, Value from django.db.utils import DatabaseError, NotSupportedError from django.test import TestCase, skipIfDBFeature, skipUnlessDBFeature from .models import Number, ReservedName @skipUnlessDBFeature('supports_select_union') class QuerySetSetOperationTests(TestCase): @classmethod def setUpTestData(cls): Number.objects.bulk_create(Number(num=i, other_num=10 - i) for i in range(10)) def number_transform(self, value): return value.num def assertNumbersEqual(self, queryset, expected_numbers, ordered=True): self.assertQuerysetEqual(queryset, expected_numbers, self.number_transform, ordered) def test_simple_union(self): qs1 = Number.objects.filter(num__lte=1) qs2 = Number.objects.filter(num__gte=8) qs3 = Number.objects.filter(num=5) self.assertNumbersEqual(qs1.union(qs2, qs3), [0, 1, 5, 8, 9], ordered=False) @skipUnlessDBFeature('supports_select_intersection') def test_simple_intersection(self): qs1 = Number.objects.filter(num__lte=5) qs2 = Number.objects.filter(num__gte=5) qs3 = Number.objects.filter(num__gte=4, num__lte=6) self.assertNumbersEqual(qs1.intersection(qs2, qs3), [5], ordered=False) @skipUnlessDBFeature('supports_select_intersection') def test_intersection_with_values(self): ReservedName.objects.create(name='a', order=2) qs1 = ReservedName.objects.all() reserved_name = qs1.intersection(qs1).values('name', 'order', 'id').get() self.assertEqual(reserved_name['name'], 'a') self.assertEqual(reserved_name['order'], 2) reserved_name = qs1.intersection(qs1).values_list('name', 'order', 'id').get() self.assertEqual(reserved_name[:2], ('a', 2)) @skipUnlessDBFeature('supports_select_difference') def test_simple_difference(self): qs1 = Number.objects.filter(num__lte=5) qs2 = Number.objects.filter(num__lte=4) self.assertNumbersEqual(qs1.difference(qs2), [5], ordered=False) def test_union_distinct(self): qs1 = Number.objects.all() qs2 = Number.objects.all() self.assertEqual(len(list(qs1.union(qs2, all=True))), 20) self.assertEqual(len(list(qs1.union(qs2))), 10) @skipUnlessDBFeature('supports_select_intersection') def test_intersection_with_empty_qs(self): qs1 = Number.objects.all() qs2 = Number.objects.none() qs3 = Number.objects.filter(pk__in=[]) self.assertEqual(len(qs1.intersection(qs2)), 0) self.assertEqual(len(qs1.intersection(qs3)), 0) self.assertEqual(len(qs2.intersection(qs1)), 0) self.assertEqual(len(qs3.intersection(qs1)), 0) self.assertEqual(len(qs2.intersection(qs2)), 0) self.assertEqual(len(qs3.intersection(qs3)), 0) @skipUnlessDBFeature('supports_select_difference') def test_difference_with_empty_qs(self): qs1 = Number.objects.all() qs2 = Number.objects.none() qs3 = Number.objects.filter(pk__in=[]) self.assertEqual(len(qs1.difference(qs2)), 10) self.assertEqual(len(qs1.difference(qs3)), 10) self.assertEqual(len(qs2.difference(qs1)), 0) self.assertEqual(len(qs3.difference(qs1)), 0) self.assertEqual(len(qs2.difference(qs2)), 0) self.assertEqual(len(qs3.difference(qs3)), 0) @skipUnlessDBFeature('supports_select_difference') def test_difference_with_values(self): ReservedName.objects.create(name='a', order=2) qs1 = ReservedName.objects.all() qs2 = ReservedName.objects.none() reserved_name = qs1.difference(qs2).values('name', 'order', 'id').get() self.assertEqual(reserved_name['name'], 'a') self.assertEqual(reserved_name['order'], 2) reserved_name = qs1.difference(qs2).values_list('name', 'order', 'id').get() self.assertEqual(reserved_name[:2], ('a', 2)) def test_union_with_empty_qs(self): qs1 = Number.objects.all() qs2 = Number.objects.none() qs3 = Number.objects.filter(pk__in=[]) self.assertEqual(len(qs1.union(qs2)), 10) self.assertEqual(len(qs2.union(qs1)), 10) self.assertEqual(len(qs1.union(qs3)), 10) self.assertEqual(len(qs3.union(qs1)), 10) self.assertEqual(len(qs2.union(qs1, qs1, qs1)), 10) self.assertEqual(len(qs2.union(qs1, qs1, all=True)), 20) self.assertEqual(len(qs2.union(qs2)), 0) self.assertEqual(len(qs3.union(qs3)), 0) def test_limits(self): qs1 = Number.objects.all() qs2 = Number.objects.all() self.assertEqual(len(list(qs1.union(qs2)[:2])), 2) def test_ordering(self): qs1 = Number.objects.filter(num__lte=1) qs2 = Number.objects.filter(num__gte=2, num__lte=3) self.assertNumbersEqual(qs1.union(qs2).order_by('-num'), [3, 2, 1, 0]) def test_ordering_by_f_expression(self): qs1 = Number.objects.filter(num__lte=1) qs2 = Number.objects.filter(num__gte=2, num__lte=3) self.assertNumbersEqual(qs1.union(qs2).order_by(F('num').desc()), [3, 2, 1, 0]) def test_union_with_values(self): ReservedName.objects.create(name='a', order=2) qs1 = ReservedName.objects.all() reserved_name = qs1.union(qs1).values('name', 'order', 'id').get() self.assertEqual(reserved_name['name'], 'a') self.assertEqual(reserved_name['order'], 2) reserved_name = qs1.union(qs1).values_list('name', 'order', 'id').get() self.assertEqual(reserved_name[:2], ('a', 2)) # List of columns can be changed. reserved_name = qs1.union(qs1).values_list('order').get() self.assertEqual(reserved_name, (2,)) def test_union_with_two_annotated_values_list(self): qs1 = Number.objects.filter(num=1).annotate( count=Value(0, IntegerField()), ).values_list('num', 'count') qs2 = Number.objects.filter(num=2).values('pk').annotate( count=F('num'), ).annotate( num=Value(1, IntegerField()), ).values_list('num', 'count') self.assertCountEqual(qs1.union(qs2), [(1, 0), (2, 1)]) def test_union_with_extra_and_values_list(self): qs1 = Number.objects.filter(num=1).extra( select={'count': 0}, ).values_list('num', 'count') qs2 = Number.objects.filter(num=2).extra(select={'count': 1}) self.assertCountEqual(qs1.union(qs2), [(1, 0), (2, 1)]) def test_union_with_values_list_on_annotated_and_unannotated(self): ReservedName.objects.create(name='rn1', order=1) qs1 = Number.objects.annotate( has_reserved_name=Exists(ReservedName.objects.filter(order=OuterRef('num'))) ).filter(has_reserved_name=True) qs2 = Number.objects.filter(num=9) self.assertCountEqual(qs1.union(qs2).values_list('num', flat=True), [1, 9]) def test_union_with_values_list_and_order(self): ReservedName.objects.bulk_create([ ReservedName(name='rn1', order=7), ReservedName(name='rn2', order=5), ReservedName(name='rn0', order=6), ReservedName(name='rn9', order=-1), ]) qs1 = ReservedName.objects.filter(order__gte=6) qs2 = ReservedName.objects.filter(order__lte=5) union_qs = qs1.union(qs2) for qs, expected_result in ( # Order by a single column. (union_qs.order_by('-pk').values_list('order', flat=True), [-1, 6, 5, 7]), (union_qs.order_by('pk').values_list('order', flat=True), [7, 5, 6, -1]), (union_qs.values_list('order', flat=True).order_by('-pk'), [-1, 6, 5, 7]), (union_qs.values_list('order', flat=True).order_by('pk'), [7, 5, 6, -1]), # Order by multiple columns. (union_qs.order_by('-name', 'pk').values_list('order', flat=True), [-1, 5, 7, 6]), (union_qs.values_list('order', flat=True).order_by('-name', 'pk'), [-1, 5, 7, 6]), ): with self.subTest(qs=qs): self.assertEqual(list(qs), expected_result) def test_count_union(self): qs1 = Number.objects.filter(num__lte=1).values('num') qs2 = Number.objects.filter(num__gte=2, num__lte=3).values('num') self.assertEqual(qs1.union(qs2).count(), 4) def test_count_union_empty_result(self): qs = Number.objects.filter(pk__in=[]) self.assertEqual(qs.union(qs).count(), 0) @skipUnlessDBFeature('supports_select_difference') def test_count_difference(self): qs1 = Number.objects.filter(num__lt=10) qs2 = Number.objects.filter(num__lt=9) self.assertEqual(qs1.difference(qs2).count(), 1) @skipUnlessDBFeature('supports_select_intersection') def test_count_intersection(self): qs1 = Number.objects.filter(num__gte=5) qs2 = Number.objects.filter(num__lte=5) self.assertEqual(qs1.intersection(qs2).count(), 1) @skipUnlessDBFeature('supports_slicing_ordering_in_compound') def test_ordering_subqueries(self): qs1 = Number.objects.order_by('num')[:2] qs2 = Number.objects.order_by('-num')[:2] self.assertNumbersEqual(qs1.union(qs2).order_by('-num')[:4], [9, 8, 1, 0]) @skipIfDBFeature('supports_slicing_ordering_in_compound') def test_unsupported_ordering_slicing_raises_db_error(self): qs1 = Number.objects.all() qs2 = Number.objects.all() msg = 'LIMIT/OFFSET not allowed in subqueries of compound statements' with self.assertRaisesMessage(DatabaseError, msg): list(qs1.union(qs2[:10])) msg = 'ORDER BY not allowed in subqueries of compound statements' with self.assertRaisesMessage(DatabaseError, msg): list(qs1.order_by('id').union(qs2)) @skipIfDBFeature('supports_select_intersection') def test_unsupported_intersection_raises_db_error(self): qs1 = Number.objects.all() qs2 = Number.objects.all() msg = 'intersection is not supported on this database backend' with self.assertRaisesMessage(NotSupportedError, msg): list(qs1.intersection(qs2)) def test_combining_multiple_models(self): ReservedName.objects.create(name='99 little bugs', order=99) qs1 = Number.objects.filter(num=1).values_list('num', flat=True) qs2 = ReservedName.objects.values_list('order') self.assertEqual(list(qs1.union(qs2).order_by('num')), [1, 99]) def test_order_raises_on_non_selected_column(self): qs1 = Number.objects.filter().annotate( annotation=Value(1, IntegerField()), ).values('annotation', num2=F('num')) qs2 = Number.objects.filter().values('id', 'num') # Should not raise list(qs1.union(qs2).order_by('annotation')) list(qs1.union(qs2).order_by('num2')) msg = 'ORDER BY term does not match any column in the result set' # 'id' is not part of the select with self.assertRaisesMessage(DatabaseError, msg): list(qs1.union(qs2).order_by('id')) # 'num' got realiased to num2 with self.assertRaisesMessage(DatabaseError, msg): list(qs1.union(qs2).order_by('num')) # switched order, now 'exists' again: list(qs2.union(qs1).order_by('num')) @skipUnlessDBFeature('supports_select_difference', 'supports_select_intersection') def test_qs_with_subcompound_qs(self): qs1 = Number.objects.all() qs2 = Number.objects.intersection(Number.objects.filter(num__gt=1)) self.assertEqual(qs1.difference(qs2).count(), 2) def test_order_by_same_type(self): qs = Number.objects.all() union = qs.union(qs) numbers = list(range(10)) self.assertNumbersEqual(union.order_by('num'), numbers) self.assertNumbersEqual(union.order_by('other_num'), reversed(numbers))
081f6f8ea041e4badb08ab16e4bc975eee6014c8041beb3a3d7e6d136ce4106b
import datetime import itertools import unittest from copy import copy from unittest import mock from django.core.management.color import no_style from django.db import ( DatabaseError, IntegrityError, OperationalError, connection, ) from django.db.models import Index, Model, Q from django.db.models.constraints import CheckConstraint, UniqueConstraint from django.db.models.deletion import CASCADE, PROTECT from django.db.models.fields import ( AutoField, BigAutoField, BigIntegerField, BinaryField, BooleanField, CharField, DateField, DateTimeField, IntegerField, PositiveIntegerField, SlugField, TextField, TimeField, UUIDField, ) from django.db.models.fields.related import ( ForeignKey, ForeignObject, ManyToManyField, OneToOneField, ) from django.db.transaction import TransactionManagementError, atomic from django.db.utils import DataError from django.test import ( TransactionTestCase, skipIfDBFeature, skipUnlessDBFeature, ) from django.test.utils import CaptureQueriesContext, isolate_apps from django.utils import timezone from .fields import ( CustomManyToManyField, InheritedManyToManyField, MediumBlobField, ) from .models import ( Author, AuthorCharFieldWithIndex, AuthorTextFieldWithIndex, AuthorWithDefaultHeight, AuthorWithEvenLongerName, AuthorWithIndexedName, AuthorWithIndexedNameAndBirthday, AuthorWithUniqueName, AuthorWithUniqueNameAndBirthday, Book, BookForeignObj, BookWeak, BookWithLongName, BookWithO2O, BookWithoutAuthor, BookWithSlug, IntegerPK, Node, Note, NoteRename, Tag, TagIndexed, TagM2MTest, TagUniqueRename, Thing, UniqueTest, new_apps, ) class SchemaTests(TransactionTestCase): """ Tests for the schema-alteration code. Be aware that these tests are more liable than most to false results, as sometimes the code to check if a test has worked is almost as complex as the code it is testing. """ available_apps = [] models = [ Author, AuthorCharFieldWithIndex, AuthorTextFieldWithIndex, AuthorWithDefaultHeight, AuthorWithEvenLongerName, Book, BookWeak, BookWithLongName, BookWithO2O, BookWithSlug, IntegerPK, Node, Note, Tag, TagIndexed, TagM2MTest, TagUniqueRename, Thing, UniqueTest, ] # Utility functions def setUp(self): # local_models should contain test dependent model classes that will be # automatically removed from the app cache on test tear down. self.local_models = [] # isolated_local_models contains models that are in test methods # decorated with @isolate_apps. self.isolated_local_models = [] def tearDown(self): # Delete any tables made for our models self.delete_tables() new_apps.clear_cache() for model in new_apps.get_models(): model._meta._expire_cache() if 'schema' in new_apps.all_models: for model in self.local_models: for many_to_many in model._meta.many_to_many: through = many_to_many.remote_field.through if through and through._meta.auto_created: del new_apps.all_models['schema'][through._meta.model_name] del new_apps.all_models['schema'][model._meta.model_name] if self.isolated_local_models: with connection.schema_editor() as editor: for model in self.isolated_local_models: editor.delete_model(model) def delete_tables(self): "Deletes all model tables for our models for a clean test environment" converter = connection.introspection.identifier_converter with connection.schema_editor() as editor: connection.disable_constraint_checking() table_names = connection.introspection.table_names() for model in itertools.chain(SchemaTests.models, self.local_models): tbl = converter(model._meta.db_table) if tbl in table_names: editor.delete_model(model) table_names.remove(tbl) connection.enable_constraint_checking() def column_classes(self, model): with connection.cursor() as cursor: columns = { d[0]: (connection.introspection.get_field_type(d[1], d), d) for d in connection.introspection.get_table_description( cursor, model._meta.db_table, ) } # SQLite has a different format for field_type for name, (type, desc) in columns.items(): if isinstance(type, tuple): columns[name] = (type[0], desc) # SQLite also doesn't error properly if not columns: raise DatabaseError("Table does not exist (empty pragma)") return columns def get_primary_key(self, table): with connection.cursor() as cursor: return connection.introspection.get_primary_key_column(cursor, table) def get_indexes(self, table): """ Get the indexes on the table using a new cursor. """ with connection.cursor() as cursor: return [ c['columns'][0] for c in connection.introspection.get_constraints(cursor, table).values() if c['index'] and len(c['columns']) == 1 ] def get_uniques(self, table): with connection.cursor() as cursor: return [ c['columns'][0] for c in connection.introspection.get_constraints(cursor, table).values() if c['unique'] and len(c['columns']) == 1 ] def get_constraints(self, table): """ Get the constraints on a table using a new cursor. """ with connection.cursor() as cursor: return connection.introspection.get_constraints(cursor, table) def get_constraints_for_column(self, model, column_name): constraints = self.get_constraints(model._meta.db_table) constraints_for_column = [] for name, details in constraints.items(): if details['columns'] == [column_name]: constraints_for_column.append(name) return sorted(constraints_for_column) def check_added_field_default(self, schema_editor, model, field, field_name, expected_default, cast_function=None): with connection.cursor() as cursor: schema_editor.add_field(model, field) cursor.execute("SELECT {} FROM {};".format(field_name, model._meta.db_table)) database_default = cursor.fetchall()[0][0] if cast_function and not type(database_default) == type(expected_default): database_default = cast_function(database_default) self.assertEqual(database_default, expected_default) def get_constraints_count(self, table, column, fk_to): """ Return a dict with keys 'fks', 'uniques, and 'indexes' indicating the number of foreign keys, unique constraints, and indexes on `table`.`column`. The `fk_to` argument is a 2-tuple specifying the expected foreign key relationship's (table, column). """ with connection.cursor() as cursor: constraints = connection.introspection.get_constraints(cursor, table) counts = {'fks': 0, 'uniques': 0, 'indexes': 0} for c in constraints.values(): if c['columns'] == [column]: if c['foreign_key'] == fk_to: counts['fks'] += 1 if c['unique']: counts['uniques'] += 1 elif c['index']: counts['indexes'] += 1 return counts def assertIndexOrder(self, table, index, order): constraints = self.get_constraints(table) self.assertIn(index, constraints) index_orders = constraints[index]['orders'] self.assertTrue(all(val == expected for val, expected in zip(index_orders, order))) def assertForeignKeyExists(self, model, column, expected_fk_table, field='id'): """ Fail if the FK constraint on `model.Meta.db_table`.`column` to `expected_fk_table`.id doesn't exist. """ constraints = self.get_constraints(model._meta.db_table) constraint_fk = None for details in constraints.values(): if details['columns'] == [column] and details['foreign_key']: constraint_fk = details['foreign_key'] break self.assertEqual(constraint_fk, (expected_fk_table, field)) def assertForeignKeyNotExists(self, model, column, expected_fk_table): with self.assertRaises(AssertionError): self.assertForeignKeyExists(model, column, expected_fk_table) # Tests def test_creation_deletion(self): """ Tries creating a model's table, and then deleting it. """ with connection.schema_editor() as editor: # Create the table editor.create_model(Author) # The table is there list(Author.objects.all()) # Clean up that table editor.delete_model(Author) # No deferred SQL should be left over. self.assertEqual(editor.deferred_sql, []) # The table is gone with self.assertRaises(DatabaseError): list(Author.objects.all()) @skipUnlessDBFeature('supports_foreign_keys') def test_fk(self): "Creating tables out of FK order, then repointing, works" # Create the table with connection.schema_editor() as editor: editor.create_model(Book) editor.create_model(Author) editor.create_model(Tag) # Initial tables are there list(Author.objects.all()) list(Book.objects.all()) # Make sure the FK constraint is present with self.assertRaises(IntegrityError): Book.objects.create( author_id=1, title="Much Ado About Foreign Keys", pub_date=datetime.datetime.now(), ) # Repoint the FK constraint old_field = Book._meta.get_field("author") new_field = ForeignKey(Tag, CASCADE) new_field.set_attributes_from_name("author") with connection.schema_editor() as editor: editor.alter_field(Book, old_field, new_field, strict=True) self.assertForeignKeyExists(Book, 'author_id', 'schema_tag') @skipUnlessDBFeature('can_create_inline_fk') def test_inline_fk(self): # Create some tables. with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) editor.create_model(Note) self.assertForeignKeyNotExists(Note, 'book_id', 'schema_book') # Add a foreign key from one to the other. with connection.schema_editor() as editor: new_field = ForeignKey(Book, CASCADE) new_field.set_attributes_from_name('book') editor.add_field(Note, new_field) self.assertForeignKeyExists(Note, 'book_id', 'schema_book') # Creating a FK field with a constraint uses a single statement without # a deferred ALTER TABLE. self.assertFalse([ sql for sql in (str(statement) for statement in editor.deferred_sql) if sql.startswith('ALTER TABLE') and 'ADD CONSTRAINT' in sql ]) @skipUnlessDBFeature('supports_foreign_keys') def test_char_field_with_db_index_to_fk(self): # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(AuthorCharFieldWithIndex) # Change CharField to FK old_field = AuthorCharFieldWithIndex._meta.get_field('char_field') new_field = ForeignKey(Author, CASCADE, blank=True) new_field.set_attributes_from_name('char_field') with connection.schema_editor() as editor: editor.alter_field(AuthorCharFieldWithIndex, old_field, new_field, strict=True) self.assertForeignKeyExists(AuthorCharFieldWithIndex, 'char_field_id', 'schema_author') @skipUnlessDBFeature('supports_foreign_keys') @skipUnlessDBFeature('supports_index_on_text_field') def test_text_field_with_db_index_to_fk(self): # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(AuthorTextFieldWithIndex) # Change TextField to FK old_field = AuthorTextFieldWithIndex._meta.get_field('text_field') new_field = ForeignKey(Author, CASCADE, blank=True) new_field.set_attributes_from_name('text_field') with connection.schema_editor() as editor: editor.alter_field(AuthorTextFieldWithIndex, old_field, new_field, strict=True) self.assertForeignKeyExists(AuthorTextFieldWithIndex, 'text_field_id', 'schema_author') @skipUnlessDBFeature('supports_foreign_keys') def test_fk_to_proxy(self): "Creating a FK to a proxy model creates database constraints." class AuthorProxy(Author): class Meta: app_label = 'schema' apps = new_apps proxy = True class AuthorRef(Model): author = ForeignKey(AuthorProxy, on_delete=CASCADE) class Meta: app_label = 'schema' apps = new_apps self.local_models = [AuthorProxy, AuthorRef] # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(AuthorRef) self.assertForeignKeyExists(AuthorRef, 'author_id', 'schema_author') @skipUnlessDBFeature('supports_foreign_keys') def test_fk_db_constraint(self): "The db_constraint parameter is respected" # Create the table with connection.schema_editor() as editor: editor.create_model(Tag) editor.create_model(Author) editor.create_model(BookWeak) # Initial tables are there list(Author.objects.all()) list(Tag.objects.all()) list(BookWeak.objects.all()) self.assertForeignKeyNotExists(BookWeak, 'author_id', 'schema_author') # Make a db_constraint=False FK new_field = ForeignKey(Tag, CASCADE, db_constraint=False) new_field.set_attributes_from_name("tag") with connection.schema_editor() as editor: editor.add_field(Author, new_field) self.assertForeignKeyNotExists(Author, 'tag_id', 'schema_tag') # Alter to one with a constraint new_field2 = ForeignKey(Tag, CASCADE) new_field2.set_attributes_from_name("tag") with connection.schema_editor() as editor: editor.alter_field(Author, new_field, new_field2, strict=True) self.assertForeignKeyExists(Author, 'tag_id', 'schema_tag') # Alter to one without a constraint again new_field2 = ForeignKey(Tag, CASCADE) new_field2.set_attributes_from_name("tag") with connection.schema_editor() as editor: editor.alter_field(Author, new_field2, new_field, strict=True) self.assertForeignKeyNotExists(Author, 'tag_id', 'schema_tag') @isolate_apps('schema') def test_no_db_constraint_added_during_primary_key_change(self): """ When a primary key that's pointed to by a ForeignKey with db_constraint=False is altered, a foreign key constraint isn't added. """ class Author(Model): class Meta: app_label = 'schema' class BookWeak(Model): author = ForeignKey(Author, CASCADE, db_constraint=False) class Meta: app_label = 'schema' with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(BookWeak) self.assertForeignKeyNotExists(BookWeak, 'author_id', 'schema_author') old_field = Author._meta.get_field('id') new_field = BigAutoField(primary_key=True) new_field.model = Author new_field.set_attributes_from_name('id') # @isolate_apps() and inner models are needed to have the model # relations populated, otherwise this doesn't act as a regression test. self.assertEqual(len(new_field.model._meta.related_objects), 1) with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) self.assertForeignKeyNotExists(BookWeak, 'author_id', 'schema_author') def _test_m2m_db_constraint(self, M2MFieldClass): class LocalAuthorWithM2M(Model): name = CharField(max_length=255) class Meta: app_label = 'schema' apps = new_apps self.local_models = [LocalAuthorWithM2M] # Create the table with connection.schema_editor() as editor: editor.create_model(Tag) editor.create_model(LocalAuthorWithM2M) # Initial tables are there list(LocalAuthorWithM2M.objects.all()) list(Tag.objects.all()) # Make a db_constraint=False FK new_field = M2MFieldClass(Tag, related_name="authors", db_constraint=False) new_field.contribute_to_class(LocalAuthorWithM2M, "tags") # Add the field with connection.schema_editor() as editor: editor.add_field(LocalAuthorWithM2M, new_field) self.assertForeignKeyNotExists(new_field.remote_field.through, 'tag_id', 'schema_tag') @skipUnlessDBFeature('supports_foreign_keys') def test_m2m_db_constraint(self): self._test_m2m_db_constraint(ManyToManyField) @skipUnlessDBFeature('supports_foreign_keys') def test_m2m_db_constraint_custom(self): self._test_m2m_db_constraint(CustomManyToManyField) @skipUnlessDBFeature('supports_foreign_keys') def test_m2m_db_constraint_inherited(self): self._test_m2m_db_constraint(InheritedManyToManyField) def test_add_field(self): """ Tests adding fields to models """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure there's no age field columns = self.column_classes(Author) self.assertNotIn("age", columns) # Add the new field new_field = IntegerField(null=True) new_field.set_attributes_from_name("age") with CaptureQueriesContext(connection) as ctx, connection.schema_editor() as editor: editor.add_field(Author, new_field) drop_default_sql = editor.sql_alter_column_no_default % { 'column': editor.quote_name(new_field.name), } self.assertFalse(any(drop_default_sql in query['sql'] for query in ctx.captured_queries)) # Ensure the field is right afterwards columns = self.column_classes(Author) self.assertEqual(columns['age'][0], "IntegerField") self.assertEqual(columns['age'][1][6], True) def test_add_field_remove_field(self): """ Adding a field and removing it removes all deferred sql referring to it. """ with connection.schema_editor() as editor: # Create a table with a unique constraint on the slug field. editor.create_model(Tag) # Remove the slug column. editor.remove_field(Tag, Tag._meta.get_field('slug')) self.assertEqual(editor.deferred_sql, []) def test_add_field_temp_default(self): """ Tests adding fields to models with a temporary default """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure there's no age field columns = self.column_classes(Author) self.assertNotIn("age", columns) # Add some rows of data Author.objects.create(name="Andrew", height=30) Author.objects.create(name="Andrea") # Add a not-null field new_field = CharField(max_length=30, default="Godwin") new_field.set_attributes_from_name("surname") with connection.schema_editor() as editor: editor.add_field(Author, new_field) # Ensure the field is right afterwards columns = self.column_classes(Author) self.assertEqual(columns['surname'][0], "CharField") self.assertEqual(columns['surname'][1][6], connection.features.interprets_empty_strings_as_nulls) def test_add_field_temp_default_boolean(self): """ Tests adding fields to models with a temporary default where the default is False. (#21783) """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure there's no age field columns = self.column_classes(Author) self.assertNotIn("age", columns) # Add some rows of data Author.objects.create(name="Andrew", height=30) Author.objects.create(name="Andrea") # Add a not-null field new_field = BooleanField(default=False) new_field.set_attributes_from_name("awesome") with connection.schema_editor() as editor: editor.add_field(Author, new_field) # Ensure the field is right afterwards columns = self.column_classes(Author) # BooleanField are stored as TINYINT(1) on MySQL. field_type = columns['awesome'][0] self.assertEqual(field_type, connection.features.introspected_boolean_field_type) def test_add_field_default_transform(self): """ Tests adding fields to models with a default that is not directly valid in the database (#22581) """ class TestTransformField(IntegerField): # Weird field that saves the count of items in its value def get_default(self): return self.default def get_prep_value(self, value): if value is None: return 0 return len(value) # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Add some rows of data Author.objects.create(name="Andrew", height=30) Author.objects.create(name="Andrea") # Add the field with a default it needs to cast (to string in this case) new_field = TestTransformField(default={1: 2}) new_field.set_attributes_from_name("thing") with connection.schema_editor() as editor: editor.add_field(Author, new_field) # Ensure the field is there columns = self.column_classes(Author) field_type, field_info = columns['thing'] self.assertEqual(field_type, 'IntegerField') # Make sure the values were transformed correctly self.assertEqual(Author.objects.extra(where=["thing = 1"]).count(), 2) def test_add_field_binary(self): """ Tests binary fields get a sane default (#22851) """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Add the new field new_field = BinaryField(blank=True) new_field.set_attributes_from_name("bits") with connection.schema_editor() as editor: editor.add_field(Author, new_field) # Ensure the field is right afterwards columns = self.column_classes(Author) # MySQL annoyingly uses the same backend, so it'll come back as one of # these two types. self.assertIn(columns['bits'][0], ("BinaryField", "TextField")) @unittest.skipUnless(connection.vendor == 'mysql', "MySQL specific") def test_add_binaryfield_mediumblob(self): """ Test adding a custom-sized binary field on MySQL (#24846). """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Add the new field with default new_field = MediumBlobField(blank=True, default=b'123') new_field.set_attributes_from_name('bits') with connection.schema_editor() as editor: editor.add_field(Author, new_field) columns = self.column_classes(Author) # Introspection treats BLOBs as TextFields self.assertEqual(columns['bits'][0], "TextField") def test_alter(self): """ Tests simple altering of fields """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure the field is right to begin with columns = self.column_classes(Author) self.assertEqual(columns['name'][0], "CharField") self.assertEqual(bool(columns['name'][1][6]), bool(connection.features.interprets_empty_strings_as_nulls)) # Alter the name field to a TextField old_field = Author._meta.get_field("name") new_field = TextField(null=True) new_field.set_attributes_from_name("name") with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) # Ensure the field is right afterwards columns = self.column_classes(Author) self.assertEqual(columns['name'][0], "TextField") self.assertEqual(columns['name'][1][6], True) # Change nullability again new_field2 = TextField(null=False) new_field2.set_attributes_from_name("name") with connection.schema_editor() as editor: editor.alter_field(Author, new_field, new_field2, strict=True) # Ensure the field is right afterwards columns = self.column_classes(Author) self.assertEqual(columns['name'][0], "TextField") self.assertEqual(bool(columns['name'][1][6]), bool(connection.features.interprets_empty_strings_as_nulls)) def test_alter_auto_field_to_integer_field(self): # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Change AutoField to IntegerField old_field = Author._meta.get_field('id') new_field = IntegerField(primary_key=True) new_field.set_attributes_from_name('id') new_field.model = Author with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) def test_alter_auto_field_to_char_field(self): # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Change AutoField to CharField old_field = Author._meta.get_field('id') new_field = CharField(primary_key=True, max_length=50) new_field.set_attributes_from_name('id') new_field.model = Author with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) def test_alter_not_unique_field_to_primary_key(self): # Create the table. with connection.schema_editor() as editor: editor.create_model(Author) # Change UUIDField to primary key. old_field = Author._meta.get_field('uuid') new_field = UUIDField(primary_key=True) new_field.set_attributes_from_name('uuid') new_field.model = Author with connection.schema_editor() as editor: editor.remove_field(Author, Author._meta.get_field('id')) editor.alter_field(Author, old_field, new_field, strict=True) def test_alter_text_field(self): # Regression for "BLOB/TEXT column 'info' can't have a default value") # on MySQL. # Create the table with connection.schema_editor() as editor: editor.create_model(Note) old_field = Note._meta.get_field("info") new_field = TextField(blank=True) new_field.set_attributes_from_name("info") with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) @skipUnlessDBFeature('can_defer_constraint_checks', 'can_rollback_ddl') def test_alter_fk_checks_deferred_constraints(self): """ #25492 - Altering a foreign key's structure and data in the same transaction. """ with connection.schema_editor() as editor: editor.create_model(Node) old_field = Node._meta.get_field('parent') new_field = ForeignKey(Node, CASCADE) new_field.set_attributes_from_name('parent') parent = Node.objects.create() with connection.schema_editor() as editor: # Update the parent FK to create a deferred constraint check. Node.objects.update(parent=parent) editor.alter_field(Node, old_field, new_field, strict=True) def test_alter_text_field_to_date_field(self): """ #25002 - Test conversion of text field to date field. """ with connection.schema_editor() as editor: editor.create_model(Note) Note.objects.create(info='1988-05-05') old_field = Note._meta.get_field('info') new_field = DateField(blank=True) new_field.set_attributes_from_name('info') with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) # Make sure the field isn't nullable columns = self.column_classes(Note) self.assertFalse(columns['info'][1][6]) def test_alter_text_field_to_datetime_field(self): """ #25002 - Test conversion of text field to datetime field. """ with connection.schema_editor() as editor: editor.create_model(Note) Note.objects.create(info='1988-05-05 3:16:17.4567') old_field = Note._meta.get_field('info') new_field = DateTimeField(blank=True) new_field.set_attributes_from_name('info') with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) # Make sure the field isn't nullable columns = self.column_classes(Note) self.assertFalse(columns['info'][1][6]) def test_alter_text_field_to_time_field(self): """ #25002 - Test conversion of text field to time field. """ with connection.schema_editor() as editor: editor.create_model(Note) Note.objects.create(info='3:16:17.4567') old_field = Note._meta.get_field('info') new_field = TimeField(blank=True) new_field.set_attributes_from_name('info') with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) # Make sure the field isn't nullable columns = self.column_classes(Note) self.assertFalse(columns['info'][1][6]) @skipIfDBFeature('interprets_empty_strings_as_nulls') def test_alter_textual_field_keep_null_status(self): """ Changing a field type shouldn't affect the not null status. """ with connection.schema_editor() as editor: editor.create_model(Note) with self.assertRaises(IntegrityError): Note.objects.create(info=None) old_field = Note._meta.get_field("info") new_field = CharField(max_length=50) new_field.set_attributes_from_name("info") with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) with self.assertRaises(IntegrityError): Note.objects.create(info=None) def test_alter_numeric_field_keep_null_status(self): """ Changing a field type shouldn't affect the not null status. """ with connection.schema_editor() as editor: editor.create_model(UniqueTest) with self.assertRaises(IntegrityError): UniqueTest.objects.create(year=None, slug='aaa') old_field = UniqueTest._meta.get_field("year") new_field = BigIntegerField() new_field.set_attributes_from_name("year") with connection.schema_editor() as editor: editor.alter_field(UniqueTest, old_field, new_field, strict=True) with self.assertRaises(IntegrityError): UniqueTest.objects.create(year=None, slug='bbb') def test_alter_null_to_not_null(self): """ #23609 - Tests handling of default values when altering from NULL to NOT NULL. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure the field is right to begin with columns = self.column_classes(Author) self.assertTrue(columns['height'][1][6]) # Create some test data Author.objects.create(name='Not null author', height=12) Author.objects.create(name='Null author') # Verify null value self.assertEqual(Author.objects.get(name='Not null author').height, 12) self.assertIsNone(Author.objects.get(name='Null author').height) # Alter the height field to NOT NULL with default old_field = Author._meta.get_field("height") new_field = PositiveIntegerField(default=42) new_field.set_attributes_from_name("height") with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) # Ensure the field is right afterwards columns = self.column_classes(Author) self.assertFalse(columns['height'][1][6]) # Verify default value self.assertEqual(Author.objects.get(name='Not null author').height, 12) self.assertEqual(Author.objects.get(name='Null author').height, 42) def test_alter_charfield_to_null(self): """ #24307 - Should skip an alter statement on databases with interprets_empty_strings_as_null when changing a CharField to null. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Change the CharField to null old_field = Author._meta.get_field('name') new_field = copy(old_field) new_field.null = True with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) @unittest.skipUnless(connection.vendor == 'postgresql', 'PostgreSQL specific') def test_alter_char_field_decrease_length(self): # Create the table. with connection.schema_editor() as editor: editor.create_model(Author) Author.objects.create(name='x' * 255) # Change max_length of CharField. old_field = Author._meta.get_field('name') new_field = CharField(max_length=254) new_field.set_attributes_from_name('name') with connection.schema_editor() as editor: msg = 'value too long for type character varying(254)' with self.assertRaisesMessage(DataError, msg): editor.alter_field(Author, old_field, new_field, strict=True) def test_alter_textfield_to_null(self): """ #24307 - Should skip an alter statement on databases with interprets_empty_strings_as_null when changing a TextField to null. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Note) # Change the TextField to null old_field = Note._meta.get_field('info') new_field = copy(old_field) new_field.null = True with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) @skipUnlessDBFeature('supports_combined_alters') def test_alter_null_to_not_null_keeping_default(self): """ #23738 - Can change a nullable field with default to non-nullable with the same default. """ # Create the table with connection.schema_editor() as editor: editor.create_model(AuthorWithDefaultHeight) # Ensure the field is right to begin with columns = self.column_classes(AuthorWithDefaultHeight) self.assertTrue(columns['height'][1][6]) # Alter the height field to NOT NULL keeping the previous default old_field = AuthorWithDefaultHeight._meta.get_field("height") new_field = PositiveIntegerField(default=42) new_field.set_attributes_from_name("height") with connection.schema_editor() as editor: editor.alter_field(AuthorWithDefaultHeight, old_field, new_field, strict=True) # Ensure the field is right afterwards columns = self.column_classes(AuthorWithDefaultHeight) self.assertFalse(columns['height'][1][6]) @skipUnlessDBFeature('supports_foreign_keys') def test_alter_fk(self): """ Tests altering of FKs """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) # Ensure the field is right to begin with columns = self.column_classes(Book) self.assertEqual(columns['author_id'][0], "IntegerField") self.assertForeignKeyExists(Book, 'author_id', 'schema_author') # Alter the FK old_field = Book._meta.get_field("author") new_field = ForeignKey(Author, CASCADE, editable=False) new_field.set_attributes_from_name("author") with connection.schema_editor() as editor: editor.alter_field(Book, old_field, new_field, strict=True) # Ensure the field is right afterwards columns = self.column_classes(Book) self.assertEqual(columns['author_id'][0], "IntegerField") self.assertForeignKeyExists(Book, 'author_id', 'schema_author') @skipUnlessDBFeature('supports_foreign_keys') def test_alter_to_fk(self): """ #24447 - Tests adding a FK constraint for an existing column """ class LocalBook(Model): author = IntegerField() title = CharField(max_length=100, db_index=True) pub_date = DateTimeField() class Meta: app_label = 'schema' apps = new_apps self.local_models = [LocalBook] # Create the tables with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(LocalBook) # Ensure no FK constraint exists constraints = self.get_constraints(LocalBook._meta.db_table) for details in constraints.values(): if details['foreign_key']: self.fail('Found an unexpected FK constraint to %s' % details['columns']) old_field = LocalBook._meta.get_field("author") new_field = ForeignKey(Author, CASCADE) new_field.set_attributes_from_name("author") with connection.schema_editor() as editor: editor.alter_field(LocalBook, old_field, new_field, strict=True) self.assertForeignKeyExists(LocalBook, 'author_id', 'schema_author') @skipUnlessDBFeature('supports_foreign_keys') def test_alter_o2o_to_fk(self): """ #24163 - Tests altering of OneToOneField to ForeignKey """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(BookWithO2O) # Ensure the field is right to begin with columns = self.column_classes(BookWithO2O) self.assertEqual(columns['author_id'][0], "IntegerField") # Ensure the field is unique author = Author.objects.create(name="Joe") BookWithO2O.objects.create(author=author, title="Django 1", pub_date=datetime.datetime.now()) with self.assertRaises(IntegrityError): BookWithO2O.objects.create(author=author, title="Django 2", pub_date=datetime.datetime.now()) BookWithO2O.objects.all().delete() self.assertForeignKeyExists(BookWithO2O, 'author_id', 'schema_author') # Alter the OneToOneField to ForeignKey old_field = BookWithO2O._meta.get_field("author") new_field = ForeignKey(Author, CASCADE) new_field.set_attributes_from_name("author") with connection.schema_editor() as editor: editor.alter_field(BookWithO2O, old_field, new_field, strict=True) # Ensure the field is right afterwards columns = self.column_classes(Book) self.assertEqual(columns['author_id'][0], "IntegerField") # Ensure the field is not unique anymore Book.objects.create(author=author, title="Django 1", pub_date=datetime.datetime.now()) Book.objects.create(author=author, title="Django 2", pub_date=datetime.datetime.now()) self.assertForeignKeyExists(Book, 'author_id', 'schema_author') @skipUnlessDBFeature('supports_foreign_keys') def test_alter_fk_to_o2o(self): """ #24163 - Tests altering of ForeignKey to OneToOneField """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) # Ensure the field is right to begin with columns = self.column_classes(Book) self.assertEqual(columns['author_id'][0], "IntegerField") # Ensure the field is not unique author = Author.objects.create(name="Joe") Book.objects.create(author=author, title="Django 1", pub_date=datetime.datetime.now()) Book.objects.create(author=author, title="Django 2", pub_date=datetime.datetime.now()) Book.objects.all().delete() self.assertForeignKeyExists(Book, 'author_id', 'schema_author') # Alter the ForeignKey to OneToOneField old_field = Book._meta.get_field("author") new_field = OneToOneField(Author, CASCADE) new_field.set_attributes_from_name("author") with connection.schema_editor() as editor: editor.alter_field(Book, old_field, new_field, strict=True) # Ensure the field is right afterwards columns = self.column_classes(BookWithO2O) self.assertEqual(columns['author_id'][0], "IntegerField") # Ensure the field is unique now BookWithO2O.objects.create(author=author, title="Django 1", pub_date=datetime.datetime.now()) with self.assertRaises(IntegrityError): BookWithO2O.objects.create(author=author, title="Django 2", pub_date=datetime.datetime.now()) self.assertForeignKeyExists(BookWithO2O, 'author_id', 'schema_author') def test_alter_field_fk_to_o2o(self): with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) expected_fks = 1 if connection.features.supports_foreign_keys else 0 # Check the index is right to begin with. counts = self.get_constraints_count( Book._meta.db_table, Book._meta.get_field('author').column, (Author._meta.db_table, Author._meta.pk.column), ) self.assertEqual(counts, {'fks': expected_fks, 'uniques': 0, 'indexes': 1}) old_field = Book._meta.get_field('author') new_field = OneToOneField(Author, CASCADE) new_field.set_attributes_from_name('author') with connection.schema_editor() as editor: editor.alter_field(Book, old_field, new_field, strict=True) counts = self.get_constraints_count( Book._meta.db_table, Book._meta.get_field('author').column, (Author._meta.db_table, Author._meta.pk.column), ) # The index on ForeignKey is replaced with a unique constraint for OneToOneField. self.assertEqual(counts, {'fks': expected_fks, 'uniques': 1, 'indexes': 0}) def test_alter_field_fk_keeps_index(self): with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) expected_fks = 1 if connection.features.supports_foreign_keys else 0 # Check the index is right to begin with. counts = self.get_constraints_count( Book._meta.db_table, Book._meta.get_field('author').column, (Author._meta.db_table, Author._meta.pk.column), ) self.assertEqual(counts, {'fks': expected_fks, 'uniques': 0, 'indexes': 1}) old_field = Book._meta.get_field('author') # on_delete changed from CASCADE. new_field = ForeignKey(Author, PROTECT) new_field.set_attributes_from_name('author') with connection.schema_editor() as editor: editor.alter_field(Book, old_field, new_field, strict=True) counts = self.get_constraints_count( Book._meta.db_table, Book._meta.get_field('author').column, (Author._meta.db_table, Author._meta.pk.column), ) # The index remains. self.assertEqual(counts, {'fks': expected_fks, 'uniques': 0, 'indexes': 1}) def test_alter_field_o2o_to_fk(self): with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(BookWithO2O) expected_fks = 1 if connection.features.supports_foreign_keys else 0 # Check the unique constraint is right to begin with. counts = self.get_constraints_count( BookWithO2O._meta.db_table, BookWithO2O._meta.get_field('author').column, (Author._meta.db_table, Author._meta.pk.column), ) self.assertEqual(counts, {'fks': expected_fks, 'uniques': 1, 'indexes': 0}) old_field = BookWithO2O._meta.get_field('author') new_field = ForeignKey(Author, CASCADE) new_field.set_attributes_from_name('author') with connection.schema_editor() as editor: editor.alter_field(BookWithO2O, old_field, new_field, strict=True) counts = self.get_constraints_count( BookWithO2O._meta.db_table, BookWithO2O._meta.get_field('author').column, (Author._meta.db_table, Author._meta.pk.column), ) # The unique constraint on OneToOneField is replaced with an index for ForeignKey. self.assertEqual(counts, {'fks': expected_fks, 'uniques': 0, 'indexes': 1}) def test_alter_field_o2o_keeps_unique(self): with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(BookWithO2O) expected_fks = 1 if connection.features.supports_foreign_keys else 0 # Check the unique constraint is right to begin with. counts = self.get_constraints_count( BookWithO2O._meta.db_table, BookWithO2O._meta.get_field('author').column, (Author._meta.db_table, Author._meta.pk.column), ) self.assertEqual(counts, {'fks': expected_fks, 'uniques': 1, 'indexes': 0}) old_field = BookWithO2O._meta.get_field('author') # on_delete changed from CASCADE. new_field = OneToOneField(Author, PROTECT) new_field.set_attributes_from_name('author') with connection.schema_editor() as editor: editor.alter_field(BookWithO2O, old_field, new_field, strict=True) counts = self.get_constraints_count( BookWithO2O._meta.db_table, BookWithO2O._meta.get_field('author').column, (Author._meta.db_table, Author._meta.pk.column), ) # The unique constraint remains. self.assertEqual(counts, {'fks': expected_fks, 'uniques': 1, 'indexes': 0}) def test_alter_db_table_case(self): # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Alter the case of the table old_table_name = Author._meta.db_table with connection.schema_editor() as editor: editor.alter_db_table(Author, old_table_name, old_table_name.upper()) def test_alter_implicit_id_to_explicit(self): """ Should be able to convert an implicit "id" field to an explicit "id" primary key field. """ with connection.schema_editor() as editor: editor.create_model(Author) old_field = Author._meta.get_field("id") new_field = AutoField(primary_key=True) new_field.set_attributes_from_name("id") new_field.model = Author with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) # This will fail if DROP DEFAULT is inadvertently executed on this # field which drops the id sequence, at least on PostgreSQL. Author.objects.create(name='Foo') Author.objects.create(name='Bar') def test_alter_autofield_pk_to_bigautofield_pk_sequence_owner(self): """ Converting an implicit PK to BigAutoField(primary_key=True) should keep a sequence owner on PostgreSQL. """ with connection.schema_editor() as editor: editor.create_model(Author) old_field = Author._meta.get_field('id') new_field = BigAutoField(primary_key=True) new_field.set_attributes_from_name('id') new_field.model = Author with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) Author.objects.create(name='Foo', pk=1) with connection.cursor() as cursor: sequence_reset_sqls = connection.ops.sequence_reset_sql(no_style(), [Author]) if sequence_reset_sqls: cursor.execute(sequence_reset_sqls[0]) # Fail on PostgreSQL if sequence is missing an owner. self.assertIsNotNone(Author.objects.create(name='Bar')) def test_alter_int_pk_to_autofield_pk(self): """ Should be able to rename an IntegerField(primary_key=True) to AutoField(primary_key=True). """ with connection.schema_editor() as editor: editor.create_model(IntegerPK) old_field = IntegerPK._meta.get_field('i') new_field = AutoField(primary_key=True) new_field.model = IntegerPK new_field.set_attributes_from_name('i') with connection.schema_editor() as editor: editor.alter_field(IntegerPK, old_field, new_field, strict=True) def test_alter_int_pk_to_bigautofield_pk(self): """ Should be able to rename an IntegerField(primary_key=True) to BigAutoField(primary_key=True). """ with connection.schema_editor() as editor: editor.create_model(IntegerPK) old_field = IntegerPK._meta.get_field('i') new_field = BigAutoField(primary_key=True) new_field.model = IntegerPK new_field.set_attributes_from_name('i') with connection.schema_editor() as editor: editor.alter_field(IntegerPK, old_field, new_field, strict=True) def test_alter_int_pk_to_int_unique(self): """ Should be able to rename an IntegerField(primary_key=True) to IntegerField(unique=True). """ with connection.schema_editor() as editor: editor.create_model(IntegerPK) # Delete the old PK old_field = IntegerPK._meta.get_field('i') new_field = IntegerField(unique=True) new_field.model = IntegerPK new_field.set_attributes_from_name('i') with connection.schema_editor() as editor: editor.alter_field(IntegerPK, old_field, new_field, strict=True) # The primary key constraint is gone. Result depends on database: # 'id' for SQLite, None for others (must not be 'i'). self.assertIn(self.get_primary_key(IntegerPK._meta.db_table), ('id', None)) # Set up a model class as it currently stands. The original IntegerPK # class is now out of date and some backends make use of the whole # model class when modifying a field (such as sqlite3 when remaking a # table) so an outdated model class leads to incorrect results. class Transitional(Model): i = IntegerField(unique=True) j = IntegerField(unique=True) class Meta: app_label = 'schema' apps = new_apps db_table = 'INTEGERPK' # model requires a new PK old_field = Transitional._meta.get_field('j') new_field = IntegerField(primary_key=True) new_field.model = Transitional new_field.set_attributes_from_name('j') with connection.schema_editor() as editor: editor.alter_field(Transitional, old_field, new_field, strict=True) # Create a model class representing the updated model. class IntegerUnique(Model): i = IntegerField(unique=True) j = IntegerField(primary_key=True) class Meta: app_label = 'schema' apps = new_apps db_table = 'INTEGERPK' # Ensure unique constraint works. IntegerUnique.objects.create(i=1, j=1) with self.assertRaises(IntegrityError): IntegerUnique.objects.create(i=1, j=2) def test_rename(self): """ Tests simple altering of fields """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure the field is right to begin with columns = self.column_classes(Author) self.assertEqual(columns['name'][0], "CharField") self.assertNotIn("display_name", columns) # Alter the name field's name old_field = Author._meta.get_field("name") new_field = CharField(max_length=254) new_field.set_attributes_from_name("display_name") with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) # Ensure the field is right afterwards columns = self.column_classes(Author) self.assertEqual(columns['display_name'][0], "CharField") self.assertNotIn("name", columns) @isolate_apps('schema') def test_rename_referenced_field(self): class Author(Model): name = CharField(max_length=255, unique=True) class Meta: app_label = 'schema' class Book(Model): author = ForeignKey(Author, CASCADE, to_field='name') class Meta: app_label = 'schema' with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) new_field = CharField(max_length=255, unique=True) new_field.set_attributes_from_name('renamed') with connection.schema_editor(atomic=connection.features.supports_atomic_references_rename) as editor: editor.alter_field(Author, Author._meta.get_field('name'), new_field) # Ensure the foreign key reference was updated. self.assertForeignKeyExists(Book, 'author_id', 'schema_author', 'renamed') @skipIfDBFeature('interprets_empty_strings_as_nulls') def test_rename_keep_null_status(self): """ Renaming a field shouldn't affect the not null status. """ with connection.schema_editor() as editor: editor.create_model(Note) with self.assertRaises(IntegrityError): Note.objects.create(info=None) old_field = Note._meta.get_field("info") new_field = TextField() new_field.set_attributes_from_name("detail_info") with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) columns = self.column_classes(Note) self.assertEqual(columns['detail_info'][0], "TextField") self.assertNotIn("info", columns) with self.assertRaises(IntegrityError): NoteRename.objects.create(detail_info=None) def _test_m2m_create(self, M2MFieldClass): """ Tests M2M fields on models during creation """ class LocalBookWithM2M(Model): author = ForeignKey(Author, CASCADE) title = CharField(max_length=100, db_index=True) pub_date = DateTimeField() tags = M2MFieldClass("TagM2MTest", related_name="books") class Meta: app_label = 'schema' apps = new_apps self.local_models = [LocalBookWithM2M] # Create the tables with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(TagM2MTest) editor.create_model(LocalBookWithM2M) # Ensure there is now an m2m table there columns = self.column_classes(LocalBookWithM2M._meta.get_field("tags").remote_field.through) self.assertEqual(columns['tagm2mtest_id'][0], "IntegerField") def test_m2m_create(self): self._test_m2m_create(ManyToManyField) def test_m2m_create_custom(self): self._test_m2m_create(CustomManyToManyField) def test_m2m_create_inherited(self): self._test_m2m_create(InheritedManyToManyField) def _test_m2m_create_through(self, M2MFieldClass): """ Tests M2M fields on models during creation with through models """ class LocalTagThrough(Model): book = ForeignKey("schema.LocalBookWithM2MThrough", CASCADE) tag = ForeignKey("schema.TagM2MTest", CASCADE) class Meta: app_label = 'schema' apps = new_apps class LocalBookWithM2MThrough(Model): tags = M2MFieldClass("TagM2MTest", related_name="books", through=LocalTagThrough) class Meta: app_label = 'schema' apps = new_apps self.local_models = [LocalTagThrough, LocalBookWithM2MThrough] # Create the tables with connection.schema_editor() as editor: editor.create_model(LocalTagThrough) editor.create_model(TagM2MTest) editor.create_model(LocalBookWithM2MThrough) # Ensure there is now an m2m table there columns = self.column_classes(LocalTagThrough) self.assertEqual(columns['book_id'][0], "IntegerField") self.assertEqual(columns['tag_id'][0], "IntegerField") def test_m2m_create_through(self): self._test_m2m_create_through(ManyToManyField) def test_m2m_create_through_custom(self): self._test_m2m_create_through(CustomManyToManyField) def test_m2m_create_through_inherited(self): self._test_m2m_create_through(InheritedManyToManyField) def _test_m2m(self, M2MFieldClass): """ Tests adding/removing M2M fields on models """ class LocalAuthorWithM2M(Model): name = CharField(max_length=255) class Meta: app_label = 'schema' apps = new_apps self.local_models = [LocalAuthorWithM2M] # Create the tables with connection.schema_editor() as editor: editor.create_model(LocalAuthorWithM2M) editor.create_model(TagM2MTest) # Create an M2M field new_field = M2MFieldClass("schema.TagM2MTest", related_name="authors") new_field.contribute_to_class(LocalAuthorWithM2M, "tags") # Ensure there's no m2m table there with self.assertRaises(DatabaseError): self.column_classes(new_field.remote_field.through) # Add the field with connection.schema_editor() as editor: editor.add_field(LocalAuthorWithM2M, new_field) # Ensure there is now an m2m table there columns = self.column_classes(new_field.remote_field.through) self.assertEqual(columns['tagm2mtest_id'][0], "IntegerField") # "Alter" the field. This should not rename the DB table to itself. with connection.schema_editor() as editor: editor.alter_field(LocalAuthorWithM2M, new_field, new_field, strict=True) # Remove the M2M table again with connection.schema_editor() as editor: editor.remove_field(LocalAuthorWithM2M, new_field) # Ensure there's no m2m table there with self.assertRaises(DatabaseError): self.column_classes(new_field.remote_field.through) # Make sure the model state is coherent with the table one now that # we've removed the tags field. opts = LocalAuthorWithM2M._meta opts.local_many_to_many.remove(new_field) del new_apps.all_models['schema'][new_field.remote_field.through._meta.model_name] opts._expire_cache() def test_m2m(self): self._test_m2m(ManyToManyField) def test_m2m_custom(self): self._test_m2m(CustomManyToManyField) def test_m2m_inherited(self): self._test_m2m(InheritedManyToManyField) def _test_m2m_through_alter(self, M2MFieldClass): """ Tests altering M2Ms with explicit through models (should no-op) """ class LocalAuthorTag(Model): author = ForeignKey("schema.LocalAuthorWithM2MThrough", CASCADE) tag = ForeignKey("schema.TagM2MTest", CASCADE) class Meta: app_label = 'schema' apps = new_apps class LocalAuthorWithM2MThrough(Model): name = CharField(max_length=255) tags = M2MFieldClass("schema.TagM2MTest", related_name="authors", through=LocalAuthorTag) class Meta: app_label = 'schema' apps = new_apps self.local_models = [LocalAuthorTag, LocalAuthorWithM2MThrough] # Create the tables with connection.schema_editor() as editor: editor.create_model(LocalAuthorTag) editor.create_model(LocalAuthorWithM2MThrough) editor.create_model(TagM2MTest) # Ensure the m2m table is there self.assertEqual(len(self.column_classes(LocalAuthorTag)), 3) # "Alter" the field's blankness. This should not actually do anything. old_field = LocalAuthorWithM2MThrough._meta.get_field("tags") new_field = M2MFieldClass("schema.TagM2MTest", related_name="authors", through=LocalAuthorTag) new_field.contribute_to_class(LocalAuthorWithM2MThrough, "tags") with connection.schema_editor() as editor: editor.alter_field(LocalAuthorWithM2MThrough, old_field, new_field, strict=True) # Ensure the m2m table is still there self.assertEqual(len(self.column_classes(LocalAuthorTag)), 3) def test_m2m_through_alter(self): self._test_m2m_through_alter(ManyToManyField) def test_m2m_through_alter_custom(self): self._test_m2m_through_alter(CustomManyToManyField) def test_m2m_through_alter_inherited(self): self._test_m2m_through_alter(InheritedManyToManyField) def _test_m2m_repoint(self, M2MFieldClass): """ Tests repointing M2M fields """ class LocalBookWithM2M(Model): author = ForeignKey(Author, CASCADE) title = CharField(max_length=100, db_index=True) pub_date = DateTimeField() tags = M2MFieldClass("TagM2MTest", related_name="books") class Meta: app_label = 'schema' apps = new_apps self.local_models = [LocalBookWithM2M] # Create the tables with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(LocalBookWithM2M) editor.create_model(TagM2MTest) editor.create_model(UniqueTest) # Ensure the M2M exists and points to TagM2MTest if connection.features.supports_foreign_keys: self.assertForeignKeyExists( LocalBookWithM2M._meta.get_field("tags").remote_field.through, 'tagm2mtest_id', 'schema_tagm2mtest', ) # Repoint the M2M old_field = LocalBookWithM2M._meta.get_field("tags") new_field = M2MFieldClass(UniqueTest) new_field.contribute_to_class(LocalBookWithM2M, "uniques") with connection.schema_editor() as editor: editor.alter_field(LocalBookWithM2M, old_field, new_field, strict=True) # Ensure old M2M is gone with self.assertRaises(DatabaseError): self.column_classes(LocalBookWithM2M._meta.get_field("tags").remote_field.through) # This model looks like the new model and is used for teardown. opts = LocalBookWithM2M._meta opts.local_many_to_many.remove(old_field) # Ensure the new M2M exists and points to UniqueTest if connection.features.supports_foreign_keys: self.assertForeignKeyExists(new_field.remote_field.through, 'uniquetest_id', 'schema_uniquetest') def test_m2m_repoint(self): self._test_m2m_repoint(ManyToManyField) def test_m2m_repoint_custom(self): self._test_m2m_repoint(CustomManyToManyField) def test_m2m_repoint_inherited(self): self._test_m2m_repoint(InheritedManyToManyField) @isolate_apps('schema') def test_m2m_rename_field_in_target_model(self): class LocalTagM2MTest(Model): title = CharField(max_length=255) class Meta: app_label = 'schema' class LocalM2M(Model): tags = ManyToManyField(LocalTagM2MTest) class Meta: app_label = 'schema' # Create the tables. with connection.schema_editor() as editor: editor.create_model(LocalM2M) editor.create_model(LocalTagM2MTest) self.isolated_local_models = [LocalM2M, LocalTagM2MTest] # Ensure the m2m table is there. self.assertEqual(len(self.column_classes(LocalM2M)), 1) # Alter a field in LocalTagM2MTest. old_field = LocalTagM2MTest._meta.get_field('title') new_field = CharField(max_length=254) new_field.contribute_to_class(LocalTagM2MTest, 'title1') # @isolate_apps() and inner models are needed to have the model # relations populated, otherwise this doesn't act as a regression test. self.assertEqual(len(new_field.model._meta.related_objects), 1) with connection.schema_editor() as editor: editor.alter_field(LocalTagM2MTest, old_field, new_field, strict=True) # Ensure the m2m table is still there. self.assertEqual(len(self.column_classes(LocalM2M)), 1) @skipUnlessDBFeature('supports_column_check_constraints') def test_check_constraints(self): """ Tests creating/deleting CHECK constraints """ # Create the tables with connection.schema_editor() as editor: editor.create_model(Author) # Ensure the constraint exists constraints = self.get_constraints(Author._meta.db_table) if not any(details['columns'] == ['height'] and details['check'] for details in constraints.values()): self.fail("No check constraint for height found") # Alter the column to remove it old_field = Author._meta.get_field("height") new_field = IntegerField(null=True, blank=True) new_field.set_attributes_from_name("height") with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) constraints = self.get_constraints(Author._meta.db_table) for details in constraints.values(): if details['columns'] == ["height"] and details['check']: self.fail("Check constraint for height found") # Alter the column to re-add it new_field2 = Author._meta.get_field("height") with connection.schema_editor() as editor: editor.alter_field(Author, new_field, new_field2, strict=True) constraints = self.get_constraints(Author._meta.db_table) if not any(details['columns'] == ['height'] and details['check'] for details in constraints.values()): self.fail("No check constraint for height found") @skipUnlessDBFeature('supports_column_check_constraints') def test_remove_field_check_does_not_remove_meta_constraints(self): with connection.schema_editor() as editor: editor.create_model(Author) # Add the custom check constraint constraint = CheckConstraint(check=Q(height__gte=0), name='author_height_gte_0_check') custom_constraint_name = constraint.name Author._meta.constraints = [constraint] with connection.schema_editor() as editor: editor.add_constraint(Author, constraint) # Ensure the constraints exist constraints = self.get_constraints(Author._meta.db_table) self.assertIn(custom_constraint_name, constraints) other_constraints = [ name for name, details in constraints.items() if details['columns'] == ['height'] and details['check'] and name != custom_constraint_name ] self.assertEqual(len(other_constraints), 1) # Alter the column to remove field check old_field = Author._meta.get_field('height') new_field = IntegerField(null=True, blank=True) new_field.set_attributes_from_name('height') with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) constraints = self.get_constraints(Author._meta.db_table) self.assertIn(custom_constraint_name, constraints) other_constraints = [ name for name, details in constraints.items() if details['columns'] == ['height'] and details['check'] and name != custom_constraint_name ] self.assertEqual(len(other_constraints), 0) # Alter the column to re-add field check new_field2 = Author._meta.get_field('height') with connection.schema_editor() as editor: editor.alter_field(Author, new_field, new_field2, strict=True) constraints = self.get_constraints(Author._meta.db_table) self.assertIn(custom_constraint_name, constraints) other_constraints = [ name for name, details in constraints.items() if details['columns'] == ['height'] and details['check'] and name != custom_constraint_name ] self.assertEqual(len(other_constraints), 1) # Drop the check constraint with connection.schema_editor() as editor: Author._meta.constraints = [] editor.remove_constraint(Author, constraint) def test_unique(self): """ Tests removing and adding unique constraints to a single column. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Tag) # Ensure the field is unique to begin with Tag.objects.create(title="foo", slug="foo") with self.assertRaises(IntegrityError): Tag.objects.create(title="bar", slug="foo") Tag.objects.all().delete() # Alter the slug field to be non-unique old_field = Tag._meta.get_field("slug") new_field = SlugField(unique=False) new_field.set_attributes_from_name("slug") with connection.schema_editor() as editor: editor.alter_field(Tag, old_field, new_field, strict=True) # Ensure the field is no longer unique Tag.objects.create(title="foo", slug="foo") Tag.objects.create(title="bar", slug="foo") Tag.objects.all().delete() # Alter the slug field to be unique new_field2 = SlugField(unique=True) new_field2.set_attributes_from_name("slug") with connection.schema_editor() as editor: editor.alter_field(Tag, new_field, new_field2, strict=True) # Ensure the field is unique again Tag.objects.create(title="foo", slug="foo") with self.assertRaises(IntegrityError): Tag.objects.create(title="bar", slug="foo") Tag.objects.all().delete() # Rename the field new_field3 = SlugField(unique=True) new_field3.set_attributes_from_name("slug2") with connection.schema_editor() as editor: editor.alter_field(Tag, new_field2, new_field3, strict=True) # Ensure the field is still unique TagUniqueRename.objects.create(title="foo", slug2="foo") with self.assertRaises(IntegrityError): TagUniqueRename.objects.create(title="bar", slug2="foo") Tag.objects.all().delete() def test_unique_name_quoting(self): old_table_name = TagUniqueRename._meta.db_table try: with connection.schema_editor() as editor: editor.create_model(TagUniqueRename) editor.alter_db_table(TagUniqueRename, old_table_name, 'unique-table') TagUniqueRename._meta.db_table = 'unique-table' # This fails if the unique index name isn't quoted. editor.alter_unique_together(TagUniqueRename, [], (('title', 'slug2'),)) finally: TagUniqueRename._meta.db_table = old_table_name @isolate_apps('schema') @unittest.skipIf(connection.vendor == 'sqlite', 'SQLite naively remakes the table on field alteration.') @skipUnlessDBFeature('supports_foreign_keys') def test_unique_no_unnecessary_fk_drops(self): """ If AlterField isn't selective about dropping foreign key constraints when modifying a field with a unique constraint, the AlterField incorrectly drops and recreates the Book.author foreign key even though it doesn't restrict the field being changed (#29193). """ class Author(Model): name = CharField(max_length=254, unique=True) class Meta: app_label = 'schema' class Book(Model): author = ForeignKey(Author, CASCADE) class Meta: app_label = 'schema' with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) new_field = CharField(max_length=255, unique=True) new_field.model = Author new_field.set_attributes_from_name('name') with self.assertLogs('django.db.backends.schema', 'DEBUG') as cm: with connection.schema_editor() as editor: editor.alter_field(Author, Author._meta.get_field('name'), new_field) # One SQL statement is executed to alter the field. self.assertEqual(len(cm.records), 1) @isolate_apps('schema') @unittest.skipIf(connection.vendor == 'sqlite', 'SQLite remakes the table on field alteration.') def test_unique_and_reverse_m2m(self): """ AlterField can modify a unique field when there's a reverse M2M relation on the model. """ class Tag(Model): title = CharField(max_length=255) slug = SlugField(unique=True) class Meta: app_label = 'schema' class Book(Model): tags = ManyToManyField(Tag, related_name='books') class Meta: app_label = 'schema' self.isolated_local_models = [Book._meta.get_field('tags').remote_field.through] with connection.schema_editor() as editor: editor.create_model(Tag) editor.create_model(Book) new_field = SlugField(max_length=75, unique=True) new_field.model = Tag new_field.set_attributes_from_name('slug') with self.assertLogs('django.db.backends.schema', 'DEBUG') as cm: with connection.schema_editor() as editor: editor.alter_field(Tag, Tag._meta.get_field('slug'), new_field) # One SQL statement is executed to alter the field. self.assertEqual(len(cm.records), 1) # Ensure that the field is still unique. Tag.objects.create(title='foo', slug='foo') with self.assertRaises(IntegrityError): Tag.objects.create(title='bar', slug='foo') @skipUnlessDBFeature('allows_multiple_constraints_on_same_fields') def test_remove_field_unique_does_not_remove_meta_constraints(self): with connection.schema_editor() as editor: editor.create_model(AuthorWithUniqueName) # Add the custom unique constraint constraint = UniqueConstraint(fields=['name'], name='author_name_uniq') custom_constraint_name = constraint.name AuthorWithUniqueName._meta.constraints = [constraint] with connection.schema_editor() as editor: editor.add_constraint(AuthorWithUniqueName, constraint) # Ensure the constraints exist constraints = self.get_constraints(AuthorWithUniqueName._meta.db_table) self.assertIn(custom_constraint_name, constraints) other_constraints = [ name for name, details in constraints.items() if details['columns'] == ['name'] and details['unique'] and name != custom_constraint_name ] self.assertEqual(len(other_constraints), 1) # Alter the column to remove field uniqueness old_field = AuthorWithUniqueName._meta.get_field('name') new_field = CharField(max_length=255) new_field.set_attributes_from_name('name') with connection.schema_editor() as editor: editor.alter_field(AuthorWithUniqueName, old_field, new_field, strict=True) constraints = self.get_constraints(AuthorWithUniqueName._meta.db_table) self.assertIn(custom_constraint_name, constraints) other_constraints = [ name for name, details in constraints.items() if details['columns'] == ['name'] and details['unique'] and name != custom_constraint_name ] self.assertEqual(len(other_constraints), 0) # Alter the column to re-add field uniqueness new_field2 = AuthorWithUniqueName._meta.get_field('name') with connection.schema_editor() as editor: editor.alter_field(AuthorWithUniqueName, new_field, new_field2, strict=True) constraints = self.get_constraints(AuthorWithUniqueName._meta.db_table) self.assertIn(custom_constraint_name, constraints) other_constraints = [ name for name, details in constraints.items() if details['columns'] == ['name'] and details['unique'] and name != custom_constraint_name ] self.assertEqual(len(other_constraints), 1) # Drop the unique constraint with connection.schema_editor() as editor: AuthorWithUniqueName._meta.constraints = [] editor.remove_constraint(AuthorWithUniqueName, constraint) def test_unique_together(self): """ Tests removing and adding unique_together constraints on a model. """ # Create the table with connection.schema_editor() as editor: editor.create_model(UniqueTest) # Ensure the fields are unique to begin with UniqueTest.objects.create(year=2012, slug="foo") UniqueTest.objects.create(year=2011, slug="foo") UniqueTest.objects.create(year=2011, slug="bar") with self.assertRaises(IntegrityError): UniqueTest.objects.create(year=2012, slug="foo") UniqueTest.objects.all().delete() # Alter the model to its non-unique-together companion with connection.schema_editor() as editor: editor.alter_unique_together(UniqueTest, UniqueTest._meta.unique_together, []) # Ensure the fields are no longer unique UniqueTest.objects.create(year=2012, slug="foo") UniqueTest.objects.create(year=2012, slug="foo") UniqueTest.objects.all().delete() # Alter it back new_field2 = SlugField(unique=True) new_field2.set_attributes_from_name("slug") with connection.schema_editor() as editor: editor.alter_unique_together(UniqueTest, [], UniqueTest._meta.unique_together) # Ensure the fields are unique again UniqueTest.objects.create(year=2012, slug="foo") with self.assertRaises(IntegrityError): UniqueTest.objects.create(year=2012, slug="foo") UniqueTest.objects.all().delete() def test_unique_together_with_fk(self): """ Tests removing and adding unique_together constraints that include a foreign key. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) # Ensure the fields are unique to begin with self.assertEqual(Book._meta.unique_together, ()) # Add the unique_together constraint with connection.schema_editor() as editor: editor.alter_unique_together(Book, [], [['author', 'title']]) # Alter it back with connection.schema_editor() as editor: editor.alter_unique_together(Book, [['author', 'title']], []) def test_unique_together_with_fk_with_existing_index(self): """ Tests removing and adding unique_together constraints that include a foreign key, where the foreign key is added after the model is created. """ # Create the tables with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(BookWithoutAuthor) new_field = ForeignKey(Author, CASCADE) new_field.set_attributes_from_name('author') editor.add_field(BookWithoutAuthor, new_field) # Ensure the fields aren't unique to begin with self.assertEqual(Book._meta.unique_together, ()) # Add the unique_together constraint with connection.schema_editor() as editor: editor.alter_unique_together(Book, [], [['author', 'title']]) # Alter it back with connection.schema_editor() as editor: editor.alter_unique_together(Book, [['author', 'title']], []) @skipUnlessDBFeature('allows_multiple_constraints_on_same_fields') def test_remove_unique_together_does_not_remove_meta_constraints(self): with connection.schema_editor() as editor: editor.create_model(AuthorWithUniqueNameAndBirthday) # Add the custom unique constraint constraint = UniqueConstraint(fields=['name', 'birthday'], name='author_name_birthday_uniq') custom_constraint_name = constraint.name AuthorWithUniqueNameAndBirthday._meta.constraints = [constraint] with connection.schema_editor() as editor: editor.add_constraint(AuthorWithUniqueNameAndBirthday, constraint) # Ensure the constraints exist constraints = self.get_constraints(AuthorWithUniqueNameAndBirthday._meta.db_table) self.assertIn(custom_constraint_name, constraints) other_constraints = [ name for name, details in constraints.items() if details['columns'] == ['name', 'birthday'] and details['unique'] and name != custom_constraint_name ] self.assertEqual(len(other_constraints), 1) # Remove unique together unique_together = AuthorWithUniqueNameAndBirthday._meta.unique_together with connection.schema_editor() as editor: editor.alter_unique_together(AuthorWithUniqueNameAndBirthday, unique_together, []) constraints = self.get_constraints(AuthorWithUniqueNameAndBirthday._meta.db_table) self.assertIn(custom_constraint_name, constraints) other_constraints = [ name for name, details in constraints.items() if details['columns'] == ['name', 'birthday'] and details['unique'] and name != custom_constraint_name ] self.assertEqual(len(other_constraints), 0) # Re-add unique together with connection.schema_editor() as editor: editor.alter_unique_together(AuthorWithUniqueNameAndBirthday, [], unique_together) constraints = self.get_constraints(AuthorWithUniqueNameAndBirthday._meta.db_table) self.assertIn(custom_constraint_name, constraints) other_constraints = [ name for name, details in constraints.items() if details['columns'] == ['name', 'birthday'] and details['unique'] and name != custom_constraint_name ] self.assertEqual(len(other_constraints), 1) # Drop the unique constraint with connection.schema_editor() as editor: AuthorWithUniqueNameAndBirthday._meta.constraints = [] editor.remove_constraint(AuthorWithUniqueNameAndBirthday, constraint) def test_index_together(self): """ Tests removing and adding index_together constraints on a model. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Tag) # Ensure there's no index on the year/slug columns first self.assertEqual( False, any( c["index"] for c in self.get_constraints("schema_tag").values() if c['columns'] == ["slug", "title"] ), ) # Alter the model to add an index with connection.schema_editor() as editor: editor.alter_index_together(Tag, [], [("slug", "title")]) # Ensure there is now an index self.assertEqual( True, any( c["index"] for c in self.get_constraints("schema_tag").values() if c['columns'] == ["slug", "title"] ), ) # Alter it back new_field2 = SlugField(unique=True) new_field2.set_attributes_from_name("slug") with connection.schema_editor() as editor: editor.alter_index_together(Tag, [("slug", "title")], []) # Ensure there's no index self.assertEqual( False, any( c["index"] for c in self.get_constraints("schema_tag").values() if c['columns'] == ["slug", "title"] ), ) def test_index_together_with_fk(self): """ Tests removing and adding index_together constraints that include a foreign key. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) # Ensure the fields are unique to begin with self.assertEqual(Book._meta.index_together, ()) # Add the unique_together constraint with connection.schema_editor() as editor: editor.alter_index_together(Book, [], [['author', 'title']]) # Alter it back with connection.schema_editor() as editor: editor.alter_index_together(Book, [['author', 'title']], []) def test_create_index_together(self): """ Tests creating models with index_together already defined """ # Create the table with connection.schema_editor() as editor: editor.create_model(TagIndexed) # Ensure there is an index self.assertEqual( True, any( c["index"] for c in self.get_constraints("schema_tagindexed").values() if c['columns'] == ["slug", "title"] ), ) @skipUnlessDBFeature('allows_multiple_constraints_on_same_fields') def test_remove_index_together_does_not_remove_meta_indexes(self): with connection.schema_editor() as editor: editor.create_model(AuthorWithIndexedNameAndBirthday) # Add the custom index index = Index(fields=['name', 'birthday'], name='author_name_birthday_idx') custom_index_name = index.name AuthorWithIndexedNameAndBirthday._meta.indexes = [index] with connection.schema_editor() as editor: editor.add_index(AuthorWithIndexedNameAndBirthday, index) # Ensure the indexes exist constraints = self.get_constraints(AuthorWithIndexedNameAndBirthday._meta.db_table) self.assertIn(custom_index_name, constraints) other_constraints = [ name for name, details in constraints.items() if details['columns'] == ['name', 'birthday'] and details['index'] and name != custom_index_name ] self.assertEqual(len(other_constraints), 1) # Remove index together index_together = AuthorWithIndexedNameAndBirthday._meta.index_together with connection.schema_editor() as editor: editor.alter_index_together(AuthorWithIndexedNameAndBirthday, index_together, []) constraints = self.get_constraints(AuthorWithIndexedNameAndBirthday._meta.db_table) self.assertIn(custom_index_name, constraints) other_constraints = [ name for name, details in constraints.items() if details['columns'] == ['name', 'birthday'] and details['index'] and name != custom_index_name ] self.assertEqual(len(other_constraints), 0) # Re-add index together with connection.schema_editor() as editor: editor.alter_index_together(AuthorWithIndexedNameAndBirthday, [], index_together) constraints = self.get_constraints(AuthorWithIndexedNameAndBirthday._meta.db_table) self.assertIn(custom_index_name, constraints) other_constraints = [ name for name, details in constraints.items() if details['columns'] == ['name', 'birthday'] and details['index'] and name != custom_index_name ] self.assertEqual(len(other_constraints), 1) # Drop the index with connection.schema_editor() as editor: AuthorWithIndexedNameAndBirthday._meta.indexes = [] editor.remove_index(AuthorWithIndexedNameAndBirthday, index) @isolate_apps('schema') def test_db_table(self): """ Tests renaming of the table """ class Author(Model): name = CharField(max_length=255) class Meta: app_label = 'schema' class Book(Model): author = ForeignKey(Author, CASCADE) class Meta: app_label = 'schema' # Create the table and one referring it. with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) # Ensure the table is there to begin with columns = self.column_classes(Author) self.assertEqual(columns['name'][0], "CharField") # Alter the table with connection.schema_editor(atomic=connection.features.supports_atomic_references_rename) as editor: editor.alter_db_table(Author, "schema_author", "schema_otherauthor") # Ensure the table is there afterwards Author._meta.db_table = "schema_otherauthor" columns = self.column_classes(Author) self.assertEqual(columns['name'][0], "CharField") # Ensure the foreign key reference was updated self.assertForeignKeyExists(Book, "author_id", "schema_otherauthor") # Alter the table again with connection.schema_editor(atomic=connection.features.supports_atomic_references_rename) as editor: editor.alter_db_table(Author, "schema_otherauthor", "schema_author") # Ensure the table is still there Author._meta.db_table = "schema_author" columns = self.column_classes(Author) self.assertEqual(columns['name'][0], "CharField") def test_add_remove_index(self): """ Tests index addition and removal """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure the table is there and has no index self.assertNotIn('title', self.get_indexes(Author._meta.db_table)) # Add the index index = Index(fields=['name'], name='author_title_idx') with connection.schema_editor() as editor: editor.add_index(Author, index) self.assertIn('name', self.get_indexes(Author._meta.db_table)) # Drop the index with connection.schema_editor() as editor: editor.remove_index(Author, index) self.assertNotIn('name', self.get_indexes(Author._meta.db_table)) def test_remove_db_index_doesnt_remove_custom_indexes(self): """ Changing db_index to False doesn't remove indexes from Meta.indexes. """ with connection.schema_editor() as editor: editor.create_model(AuthorWithIndexedName) # Ensure the table has its index self.assertIn('name', self.get_indexes(AuthorWithIndexedName._meta.db_table)) # Add the custom index index = Index(fields=['-name'], name='author_name_idx') author_index_name = index.name with connection.schema_editor() as editor: db_index_name = editor._create_index_name( table_name=AuthorWithIndexedName._meta.db_table, column_names=('name',), ) try: AuthorWithIndexedName._meta.indexes = [index] with connection.schema_editor() as editor: editor.add_index(AuthorWithIndexedName, index) old_constraints = self.get_constraints(AuthorWithIndexedName._meta.db_table) self.assertIn(author_index_name, old_constraints) self.assertIn(db_index_name, old_constraints) # Change name field to db_index=False old_field = AuthorWithIndexedName._meta.get_field('name') new_field = CharField(max_length=255) new_field.set_attributes_from_name('name') with connection.schema_editor() as editor: editor.alter_field(AuthorWithIndexedName, old_field, new_field, strict=True) new_constraints = self.get_constraints(AuthorWithIndexedName._meta.db_table) self.assertNotIn(db_index_name, new_constraints) # The index from Meta.indexes is still in the database. self.assertIn(author_index_name, new_constraints) # Drop the index with connection.schema_editor() as editor: editor.remove_index(AuthorWithIndexedName, index) finally: AuthorWithIndexedName._meta.indexes = [] def test_order_index(self): """ Indexes defined with ordering (ASC/DESC) defined on column """ with connection.schema_editor() as editor: editor.create_model(Author) # The table doesn't have an index self.assertNotIn('title', self.get_indexes(Author._meta.db_table)) index_name = 'author_name_idx' # Add the index index = Index(fields=['name', '-weight'], name=index_name) with connection.schema_editor() as editor: editor.add_index(Author, index) if connection.features.supports_index_column_ordering: self.assertIndexOrder(Author._meta.db_table, index_name, ['ASC', 'DESC']) # Drop the index with connection.schema_editor() as editor: editor.remove_index(Author, index) def test_indexes(self): """ Tests creation/altering of indexes """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) # Ensure the table is there and has the right index self.assertIn( "title", self.get_indexes(Book._meta.db_table), ) # Alter to remove the index old_field = Book._meta.get_field("title") new_field = CharField(max_length=100, db_index=False) new_field.set_attributes_from_name("title") with connection.schema_editor() as editor: editor.alter_field(Book, old_field, new_field, strict=True) # Ensure the table is there and has no index self.assertNotIn( "title", self.get_indexes(Book._meta.db_table), ) # Alter to re-add the index new_field2 = Book._meta.get_field("title") with connection.schema_editor() as editor: editor.alter_field(Book, new_field, new_field2, strict=True) # Ensure the table is there and has the index again self.assertIn( "title", self.get_indexes(Book._meta.db_table), ) # Add a unique column, verify that creates an implicit index new_field3 = BookWithSlug._meta.get_field("slug") with connection.schema_editor() as editor: editor.add_field(Book, new_field3) self.assertIn( "slug", self.get_uniques(Book._meta.db_table), ) # Remove the unique, check the index goes with it new_field4 = CharField(max_length=20, unique=False) new_field4.set_attributes_from_name("slug") with connection.schema_editor() as editor: editor.alter_field(BookWithSlug, new_field3, new_field4, strict=True) self.assertNotIn( "slug", self.get_uniques(Book._meta.db_table), ) def test_text_field_with_db_index(self): with connection.schema_editor() as editor: editor.create_model(AuthorTextFieldWithIndex) # The text_field index is present if the database supports it. assertion = self.assertIn if connection.features.supports_index_on_text_field else self.assertNotIn assertion('text_field', self.get_indexes(AuthorTextFieldWithIndex._meta.db_table)) def test_primary_key(self): """ Tests altering of the primary key """ # Create the table with connection.schema_editor() as editor: editor.create_model(Tag) # Ensure the table is there and has the right PK self.assertEqual(self.get_primary_key(Tag._meta.db_table), 'id') # Alter to change the PK id_field = Tag._meta.get_field("id") old_field = Tag._meta.get_field("slug") new_field = SlugField(primary_key=True) new_field.set_attributes_from_name("slug") new_field.model = Tag with connection.schema_editor() as editor: editor.remove_field(Tag, id_field) editor.alter_field(Tag, old_field, new_field) # Ensure the PK changed self.assertNotIn( 'id', self.get_indexes(Tag._meta.db_table), ) self.assertEqual(self.get_primary_key(Tag._meta.db_table), 'slug') def test_context_manager_exit(self): """ Ensures transaction is correctly closed when an error occurs inside a SchemaEditor context. """ class SomeError(Exception): pass try: with connection.schema_editor(): raise SomeError except SomeError: self.assertFalse(connection.in_atomic_block) @skipIfDBFeature('can_rollback_ddl') def test_unsupported_transactional_ddl_disallowed(self): message = ( "Executing DDL statements while in a transaction on databases " "that can't perform a rollback is prohibited." ) with atomic(), connection.schema_editor() as editor: with self.assertRaisesMessage(TransactionManagementError, message): editor.execute(editor.sql_create_table % {'table': 'foo', 'definition': ''}) @skipUnlessDBFeature('supports_foreign_keys') def test_foreign_key_index_long_names_regression(self): """ Regression test for #21497. Only affects databases that supports foreign keys. """ # Create the table with connection.schema_editor() as editor: editor.create_model(AuthorWithEvenLongerName) editor.create_model(BookWithLongName) # Find the properly shortened column name column_name = connection.ops.quote_name("author_foreign_key_with_really_long_field_name_id") column_name = column_name[1:-1].lower() # unquote, and, for Oracle, un-upcase # Ensure the table is there and has an index on the column self.assertIn( column_name, self.get_indexes(BookWithLongName._meta.db_table), ) @skipUnlessDBFeature('supports_foreign_keys') def test_add_foreign_key_long_names(self): """ Regression test for #23009. Only affects databases that supports foreign keys. """ # Create the initial tables with connection.schema_editor() as editor: editor.create_model(AuthorWithEvenLongerName) editor.create_model(BookWithLongName) # Add a second FK, this would fail due to long ref name before the fix new_field = ForeignKey(AuthorWithEvenLongerName, CASCADE, related_name="something") new_field.set_attributes_from_name("author_other_really_long_named_i_mean_so_long_fk") with connection.schema_editor() as editor: editor.add_field(BookWithLongName, new_field) @isolate_apps('schema') @skipUnlessDBFeature('supports_foreign_keys') def test_add_foreign_key_quoted_db_table(self): class Author(Model): class Meta: db_table = '"table_author_double_quoted"' app_label = 'schema' class Book(Model): author = ForeignKey(Author, CASCADE) class Meta: app_label = 'schema' with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) if connection.vendor == 'mysql': self.assertForeignKeyExists(Book, 'author_id', '"table_author_double_quoted"') else: self.assertForeignKeyExists(Book, 'author_id', 'table_author_double_quoted') def test_add_foreign_object(self): with connection.schema_editor() as editor: editor.create_model(BookForeignObj) new_field = ForeignObject(Author, on_delete=CASCADE, from_fields=['author_id'], to_fields=['id']) new_field.set_attributes_from_name('author') with connection.schema_editor() as editor: editor.add_field(BookForeignObj, new_field) def test_creation_deletion_reserved_names(self): """ Tries creating a model's table, and then deleting it when it has a SQL reserved name. """ # Create the table with connection.schema_editor() as editor: try: editor.create_model(Thing) except OperationalError as e: self.fail("Errors when applying initial migration for a model " "with a table named after an SQL reserved word: %s" % e) # The table is there list(Thing.objects.all()) # Clean up that table with connection.schema_editor() as editor: editor.delete_model(Thing) # The table is gone with self.assertRaises(DatabaseError): list(Thing.objects.all()) def test_remove_constraints_capital_letters(self): """ #23065 - Constraint names must be quoted if they contain capital letters. """ def get_field(*args, field_class=IntegerField, **kwargs): kwargs['db_column'] = "CamelCase" field = field_class(*args, **kwargs) field.set_attributes_from_name("CamelCase") return field model = Author field = get_field() table = model._meta.db_table column = field.column identifier_converter = connection.introspection.identifier_converter with connection.schema_editor() as editor: editor.create_model(model) editor.add_field(model, field) constraint_name = 'CamelCaseIndex' expected_constraint_name = identifier_converter(constraint_name) editor.execute( editor.sql_create_index % { "table": editor.quote_name(table), "name": editor.quote_name(constraint_name), "using": "", "columns": editor.quote_name(column), "extra": "", "condition": "", } ) self.assertIn(expected_constraint_name, self.get_constraints(model._meta.db_table)) editor.alter_field(model, get_field(db_index=True), field, strict=True) self.assertNotIn(expected_constraint_name, self.get_constraints(model._meta.db_table)) constraint_name = 'CamelCaseUniqConstraint' expected_constraint_name = identifier_converter(constraint_name) editor.execute(editor._create_unique_sql(model, [field.column], constraint_name)) self.assertIn(expected_constraint_name, self.get_constraints(model._meta.db_table)) editor.alter_field(model, get_field(unique=True), field, strict=True) self.assertNotIn(expected_constraint_name, self.get_constraints(model._meta.db_table)) if editor.sql_create_fk: constraint_name = 'CamelCaseFKConstraint' expected_constraint_name = identifier_converter(constraint_name) editor.execute( editor.sql_create_fk % { "table": editor.quote_name(table), "name": editor.quote_name(constraint_name), "column": editor.quote_name(column), "to_table": editor.quote_name(table), "to_column": editor.quote_name(model._meta.auto_field.column), "deferrable": connection.ops.deferrable_sql(), } ) self.assertIn(expected_constraint_name, self.get_constraints(model._meta.db_table)) editor.alter_field(model, get_field(Author, CASCADE, field_class=ForeignKey), field, strict=True) self.assertNotIn(expected_constraint_name, self.get_constraints(model._meta.db_table)) def test_add_field_use_effective_default(self): """ #23987 - effective_default() should be used as the field default when adding a new field. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure there's no surname field columns = self.column_classes(Author) self.assertNotIn("surname", columns) # Create a row Author.objects.create(name='Anonymous1') # Add new CharField to ensure default will be used from effective_default new_field = CharField(max_length=15, blank=True) new_field.set_attributes_from_name("surname") with connection.schema_editor() as editor: editor.add_field(Author, new_field) # Ensure field was added with the right default with connection.cursor() as cursor: cursor.execute("SELECT surname FROM schema_author;") item = cursor.fetchall()[0] self.assertEqual(item[0], None if connection.features.interprets_empty_strings_as_nulls else '') def test_add_field_default_dropped(self): # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure there's no surname field columns = self.column_classes(Author) self.assertNotIn("surname", columns) # Create a row Author.objects.create(name='Anonymous1') # Add new CharField with a default new_field = CharField(max_length=15, blank=True, default='surname default') new_field.set_attributes_from_name("surname") with connection.schema_editor() as editor: editor.add_field(Author, new_field) # Ensure field was added with the right default with connection.cursor() as cursor: cursor.execute("SELECT surname FROM schema_author;") item = cursor.fetchall()[0] self.assertEqual(item[0], 'surname default') # And that the default is no longer set in the database. field = next( f for f in connection.introspection.get_table_description(cursor, "schema_author") if f.name == "surname" ) if connection.features.can_introspect_default: self.assertIsNone(field.default) def test_alter_field_default_dropped(self): # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Create a row Author.objects.create(name='Anonymous1') self.assertIsNone(Author.objects.get().height) old_field = Author._meta.get_field('height') # The default from the new field is used in updating existing rows. new_field = IntegerField(blank=True, default=42) new_field.set_attributes_from_name('height') with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) self.assertEqual(Author.objects.get().height, 42) # The database default should be removed. with connection.cursor() as cursor: field = next( f for f in connection.introspection.get_table_description(cursor, "schema_author") if f.name == "height" ) if connection.features.can_introspect_default: self.assertIsNone(field.default) @unittest.skipIf(connection.vendor == 'sqlite', 'SQLite naively remakes the table on field alteration.') def test_alter_field_default_doesnt_perfom_queries(self): """ No queries are performed if a field default changes and the field's not changing from null to non-null. """ with connection.schema_editor() as editor: editor.create_model(AuthorWithDefaultHeight) old_field = AuthorWithDefaultHeight._meta.get_field('height') new_default = old_field.default * 2 new_field = PositiveIntegerField(null=True, blank=True, default=new_default) new_field.set_attributes_from_name('height') with connection.schema_editor() as editor, self.assertNumQueries(0): editor.alter_field(AuthorWithDefaultHeight, old_field, new_field, strict=True) def test_add_textfield_unhashable_default(self): # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Create a row Author.objects.create(name='Anonymous1') # Create a field that has an unhashable default new_field = TextField(default={}) new_field.set_attributes_from_name("info") with connection.schema_editor() as editor: editor.add_field(Author, new_field) @unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific") def test_add_indexed_charfield(self): field = CharField(max_length=255, db_index=True) field.set_attributes_from_name('nom_de_plume') with connection.schema_editor() as editor: editor.create_model(Author) editor.add_field(Author, field) # Should create two indexes; one for like operator. self.assertEqual( self.get_constraints_for_column(Author, 'nom_de_plume'), ['schema_author_nom_de_plume_7570a851', 'schema_author_nom_de_plume_7570a851_like'], ) @unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific") def test_add_unique_charfield(self): field = CharField(max_length=255, unique=True) field.set_attributes_from_name('nom_de_plume') with connection.schema_editor() as editor: editor.create_model(Author) editor.add_field(Author, field) # Should create two indexes; one for like operator. self.assertEqual( self.get_constraints_for_column(Author, 'nom_de_plume'), ['schema_author_nom_de_plume_7570a851_like', 'schema_author_nom_de_plume_key'] ) @unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific") def test_alter_field_add_index_to_charfield(self): # Create the table and verify no initial indexes. with connection.schema_editor() as editor: editor.create_model(Author) self.assertEqual(self.get_constraints_for_column(Author, 'name'), []) # Alter to add db_index=True and create 2 indexes. old_field = Author._meta.get_field('name') new_field = CharField(max_length=255, db_index=True) new_field.set_attributes_from_name('name') with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) self.assertEqual( self.get_constraints_for_column(Author, 'name'), ['schema_author_name_1fbc5617', 'schema_author_name_1fbc5617_like'] ) # Remove db_index=True to drop both indexes. with connection.schema_editor() as editor: editor.alter_field(Author, new_field, old_field, strict=True) self.assertEqual(self.get_constraints_for_column(Author, 'name'), []) @unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific") def test_alter_field_add_unique_to_charfield(self): # Create the table and verify no initial indexes. with connection.schema_editor() as editor: editor.create_model(Author) self.assertEqual(self.get_constraints_for_column(Author, 'name'), []) # Alter to add unique=True and create 2 indexes. old_field = Author._meta.get_field('name') new_field = CharField(max_length=255, unique=True) new_field.set_attributes_from_name('name') with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) self.assertEqual( self.get_constraints_for_column(Author, 'name'), ['schema_author_name_1fbc5617_like', 'schema_author_name_1fbc5617_uniq'] ) # Remove unique=True to drop both indexes. with connection.schema_editor() as editor: editor.alter_field(Author, new_field, old_field, strict=True) self.assertEqual(self.get_constraints_for_column(Author, 'name'), []) @unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific") def test_alter_field_add_index_to_textfield(self): # Create the table and verify no initial indexes. with connection.schema_editor() as editor: editor.create_model(Note) self.assertEqual(self.get_constraints_for_column(Note, 'info'), []) # Alter to add db_index=True and create 2 indexes. old_field = Note._meta.get_field('info') new_field = TextField(db_index=True) new_field.set_attributes_from_name('info') with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) self.assertEqual( self.get_constraints_for_column(Note, 'info'), ['schema_note_info_4b0ea695', 'schema_note_info_4b0ea695_like'] ) # Remove db_index=True to drop both indexes. with connection.schema_editor() as editor: editor.alter_field(Note, new_field, old_field, strict=True) self.assertEqual(self.get_constraints_for_column(Note, 'info'), []) @unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific") def test_alter_field_add_unique_to_charfield_with_db_index(self): # Create the table and verify initial indexes. with connection.schema_editor() as editor: editor.create_model(BookWithoutAuthor) self.assertEqual( self.get_constraints_for_column(BookWithoutAuthor, 'title'), ['schema_book_title_2dfb2dff', 'schema_book_title_2dfb2dff_like'] ) # Alter to add unique=True (should replace the index) old_field = BookWithoutAuthor._meta.get_field('title') new_field = CharField(max_length=100, db_index=True, unique=True) new_field.set_attributes_from_name('title') with connection.schema_editor() as editor: editor.alter_field(BookWithoutAuthor, old_field, new_field, strict=True) self.assertEqual( self.get_constraints_for_column(BookWithoutAuthor, 'title'), ['schema_book_title_2dfb2dff_like', 'schema_book_title_2dfb2dff_uniq'] ) # Alter to remove unique=True (should drop unique index) new_field2 = CharField(max_length=100, db_index=True) new_field2.set_attributes_from_name('title') with connection.schema_editor() as editor: editor.alter_field(BookWithoutAuthor, new_field, new_field2, strict=True) self.assertEqual( self.get_constraints_for_column(BookWithoutAuthor, 'title'), ['schema_book_title_2dfb2dff', 'schema_book_title_2dfb2dff_like'] ) @unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific") def test_alter_field_remove_unique_and_db_index_from_charfield(self): # Create the table and verify initial indexes. with connection.schema_editor() as editor: editor.create_model(BookWithoutAuthor) self.assertEqual( self.get_constraints_for_column(BookWithoutAuthor, 'title'), ['schema_book_title_2dfb2dff', 'schema_book_title_2dfb2dff_like'] ) # Alter to add unique=True (should replace the index) old_field = BookWithoutAuthor._meta.get_field('title') new_field = CharField(max_length=100, db_index=True, unique=True) new_field.set_attributes_from_name('title') with connection.schema_editor() as editor: editor.alter_field(BookWithoutAuthor, old_field, new_field, strict=True) self.assertEqual( self.get_constraints_for_column(BookWithoutAuthor, 'title'), ['schema_book_title_2dfb2dff_like', 'schema_book_title_2dfb2dff_uniq'] ) # Alter to remove both unique=True and db_index=True (should drop all indexes) new_field2 = CharField(max_length=100) new_field2.set_attributes_from_name('title') with connection.schema_editor() as editor: editor.alter_field(BookWithoutAuthor, new_field, new_field2, strict=True) self.assertEqual(self.get_constraints_for_column(BookWithoutAuthor, 'title'), []) @unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific") def test_alter_field_swap_unique_and_db_index_with_charfield(self): # Create the table and verify initial indexes. with connection.schema_editor() as editor: editor.create_model(BookWithoutAuthor) self.assertEqual( self.get_constraints_for_column(BookWithoutAuthor, 'title'), ['schema_book_title_2dfb2dff', 'schema_book_title_2dfb2dff_like'] ) # Alter to set unique=True and remove db_index=True (should replace the index) old_field = BookWithoutAuthor._meta.get_field('title') new_field = CharField(max_length=100, unique=True) new_field.set_attributes_from_name('title') with connection.schema_editor() as editor: editor.alter_field(BookWithoutAuthor, old_field, new_field, strict=True) self.assertEqual( self.get_constraints_for_column(BookWithoutAuthor, 'title'), ['schema_book_title_2dfb2dff_like', 'schema_book_title_2dfb2dff_uniq'] ) # Alter to set db_index=True and remove unique=True (should restore index) new_field2 = CharField(max_length=100, db_index=True) new_field2.set_attributes_from_name('title') with connection.schema_editor() as editor: editor.alter_field(BookWithoutAuthor, new_field, new_field2, strict=True) self.assertEqual( self.get_constraints_for_column(BookWithoutAuthor, 'title'), ['schema_book_title_2dfb2dff', 'schema_book_title_2dfb2dff_like'] ) @unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific") def test_alter_field_add_db_index_to_charfield_with_unique(self): # Create the table and verify initial indexes. with connection.schema_editor() as editor: editor.create_model(Tag) self.assertEqual( self.get_constraints_for_column(Tag, 'slug'), ['schema_tag_slug_2c418ba3_like', 'schema_tag_slug_key'] ) # Alter to add db_index=True old_field = Tag._meta.get_field('slug') new_field = SlugField(db_index=True, unique=True) new_field.set_attributes_from_name('slug') with connection.schema_editor() as editor: editor.alter_field(Tag, old_field, new_field, strict=True) self.assertEqual( self.get_constraints_for_column(Tag, 'slug'), ['schema_tag_slug_2c418ba3_like', 'schema_tag_slug_key'] ) # Alter to remove db_index=True new_field2 = SlugField(unique=True) new_field2.set_attributes_from_name('slug') with connection.schema_editor() as editor: editor.alter_field(Tag, new_field, new_field2, strict=True) self.assertEqual( self.get_constraints_for_column(Tag, 'slug'), ['schema_tag_slug_2c418ba3_like', 'schema_tag_slug_key'] ) def test_alter_field_add_index_to_integerfield(self): # Create the table and verify no initial indexes. with connection.schema_editor() as editor: editor.create_model(Author) self.assertEqual(self.get_constraints_for_column(Author, 'weight'), []) # Alter to add db_index=True and create index. old_field = Author._meta.get_field('weight') new_field = IntegerField(null=True, db_index=True) new_field.set_attributes_from_name('weight') with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) self.assertEqual(self.get_constraints_for_column(Author, 'weight'), ['schema_author_weight_587740f9']) # Remove db_index=True to drop index. with connection.schema_editor() as editor: editor.alter_field(Author, new_field, old_field, strict=True) self.assertEqual(self.get_constraints_for_column(Author, 'weight'), []) def test_alter_pk_with_self_referential_field(self): """ Changing the primary key field name of a model with a self-referential foreign key (#26384). """ with connection.schema_editor() as editor: editor.create_model(Node) old_field = Node._meta.get_field('node_id') new_field = AutoField(primary_key=True) new_field.set_attributes_from_name('id') with connection.schema_editor() as editor: editor.alter_field(Node, old_field, new_field, strict=True) self.assertForeignKeyExists(Node, 'parent_id', Node._meta.db_table) @mock.patch('django.db.backends.base.schema.datetime') @mock.patch('django.db.backends.base.schema.timezone') def test_add_datefield_and_datetimefield_use_effective_default(self, mocked_datetime, mocked_tz): """ effective_default() should be used for DateField, DateTimeField, and TimeField if auto_now or auto_add_now is set (#25005). """ now = datetime.datetime(month=1, day=1, year=2000, hour=1, minute=1) now_tz = datetime.datetime(month=1, day=1, year=2000, hour=1, minute=1, tzinfo=timezone.utc) mocked_datetime.now = mock.MagicMock(return_value=now) mocked_tz.now = mock.MagicMock(return_value=now_tz) # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Check auto_now/auto_now_add attributes are not defined columns = self.column_classes(Author) self.assertNotIn("dob_auto_now", columns) self.assertNotIn("dob_auto_now_add", columns) self.assertNotIn("dtob_auto_now", columns) self.assertNotIn("dtob_auto_now_add", columns) self.assertNotIn("tob_auto_now", columns) self.assertNotIn("tob_auto_now_add", columns) # Create a row Author.objects.create(name='Anonymous1') # Ensure fields were added with the correct defaults dob_auto_now = DateField(auto_now=True) dob_auto_now.set_attributes_from_name('dob_auto_now') self.check_added_field_default( editor, Author, dob_auto_now, 'dob_auto_now', now.date(), cast_function=lambda x: x.date(), ) dob_auto_now_add = DateField(auto_now_add=True) dob_auto_now_add.set_attributes_from_name('dob_auto_now_add') self.check_added_field_default( editor, Author, dob_auto_now_add, 'dob_auto_now_add', now.date(), cast_function=lambda x: x.date(), ) dtob_auto_now = DateTimeField(auto_now=True) dtob_auto_now.set_attributes_from_name('dtob_auto_now') self.check_added_field_default( editor, Author, dtob_auto_now, 'dtob_auto_now', now, ) dt_tm_of_birth_auto_now_add = DateTimeField(auto_now_add=True) dt_tm_of_birth_auto_now_add.set_attributes_from_name('dtob_auto_now_add') self.check_added_field_default( editor, Author, dt_tm_of_birth_auto_now_add, 'dtob_auto_now_add', now, ) tob_auto_now = TimeField(auto_now=True) tob_auto_now.set_attributes_from_name('tob_auto_now') self.check_added_field_default( editor, Author, tob_auto_now, 'tob_auto_now', now.time(), cast_function=lambda x: x.time(), ) tob_auto_now_add = TimeField(auto_now_add=True) tob_auto_now_add.set_attributes_from_name('tob_auto_now_add') self.check_added_field_default( editor, Author, tob_auto_now_add, 'tob_auto_now_add', now.time(), cast_function=lambda x: x.time(), ) def test_namespaced_db_table_create_index_name(self): """ Table names are stripped of their namespace/schema before being used to generate index names. """ with connection.schema_editor() as editor: max_name_length = connection.ops.max_name_length() or 200 namespace = 'n' * max_name_length table_name = 't' * max_name_length namespaced_table_name = '"%s"."%s"' % (namespace, table_name) self.assertEqual( editor._create_index_name(table_name, []), editor._create_index_name(namespaced_table_name, []), ) @unittest.skipUnless(connection.vendor == 'oracle', 'Oracle specific db_table syntax') def test_creation_with_db_table_double_quotes(self): oracle_user = connection.creation._test_database_user() class Student(Model): name = CharField(max_length=30) class Meta: app_label = 'schema' apps = new_apps db_table = '"%s"."DJANGO_STUDENT_TABLE"' % oracle_user class Document(Model): name = CharField(max_length=30) students = ManyToManyField(Student) class Meta: app_label = 'schema' apps = new_apps db_table = '"%s"."DJANGO_DOCUMENT_TABLE"' % oracle_user self.local_models = [Student, Document] with connection.schema_editor() as editor: editor.create_model(Student) editor.create_model(Document) doc = Document.objects.create(name='Test Name') student = Student.objects.create(name='Some man') doc.students.add(student) def test_rename_table_renames_deferred_sql_references(self): atomic_rename = connection.features.supports_atomic_references_rename with connection.schema_editor(atomic=atomic_rename) as editor: editor.create_model(Author) editor.create_model(Book) editor.alter_db_table(Author, 'schema_author', 'schema_renamed_author') editor.alter_db_table(Author, 'schema_book', 'schema_renamed_book') self.assertGreater(len(editor.deferred_sql), 0) for statement in editor.deferred_sql: self.assertIs(statement.references_table('schema_author'), False) self.assertIs(statement.references_table('schema_book'), False) @unittest.skipIf(connection.vendor == 'sqlite', 'SQLite naively remakes the table on field alteration.') def test_rename_column_renames_deferred_sql_references(self): with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) old_title = Book._meta.get_field('title') new_title = CharField(max_length=100, db_index=True) new_title.set_attributes_from_name('renamed_title') editor.alter_field(Book, old_title, new_title) old_author = Book._meta.get_field('author') new_author = ForeignKey(Author, CASCADE) new_author.set_attributes_from_name('renamed_author') editor.alter_field(Book, old_author, new_author) self.assertGreater(len(editor.deferred_sql), 0) for statement in editor.deferred_sql: self.assertIs(statement.references_column('book', 'title'), False) self.assertIs(statement.references_column('book', 'author_id'), False) @isolate_apps('schema') def test_referenced_field_without_constraint_rename_inside_atomic_block(self): """ Foreign keys without database level constraint don't prevent the field they reference from being renamed in an atomic block. """ class Foo(Model): field = CharField(max_length=255, unique=True) class Meta: app_label = 'schema' class Bar(Model): foo = ForeignKey(Foo, CASCADE, to_field='field', db_constraint=False) class Meta: app_label = 'schema' self.isolated_local_models = [Foo, Bar] with connection.schema_editor() as editor: editor.create_model(Foo) editor.create_model(Bar) new_field = CharField(max_length=255, unique=True) new_field.set_attributes_from_name('renamed') with connection.schema_editor(atomic=True) as editor: editor.alter_field(Foo, Foo._meta.get_field('field'), new_field) @isolate_apps('schema') def test_referenced_table_without_constraint_rename_inside_atomic_block(self): """ Foreign keys without database level constraint don't prevent the table they reference from being renamed in an atomic block. """ class Foo(Model): field = CharField(max_length=255, unique=True) class Meta: app_label = 'schema' class Bar(Model): foo = ForeignKey(Foo, CASCADE, to_field='field', db_constraint=False) class Meta: app_label = 'schema' self.isolated_local_models = [Foo, Bar] with connection.schema_editor() as editor: editor.create_model(Foo) editor.create_model(Bar) new_field = CharField(max_length=255, unique=True) new_field.set_attributes_from_name('renamed') with connection.schema_editor(atomic=True) as editor: editor.alter_db_table(Foo, Foo._meta.db_table, 'renamed_table') Foo._meta.db_table = 'renamed_table'
d3c0d0de51b90678e13ceb2c3bcda3c9fafbfb025d906e6ee7b14f68ba247460
from django.core import checks from django.core.checks import Error from django.db import models from django.test import SimpleTestCase, TestCase, skipUnlessDBFeature from django.test.utils import ( isolate_apps, modify_settings, override_system_checks, ) @isolate_apps('check_framework', attr_name='apps') @override_system_checks([checks.model_checks.check_all_models]) class DuplicateDBTableTests(SimpleTestCase): def test_collision_in_same_app(self): class Model1(models.Model): class Meta: db_table = 'test_table' class Model2(models.Model): class Meta: db_table = 'test_table' self.assertEqual(checks.run_checks(app_configs=self.apps.get_app_configs()), [ Error( "db_table 'test_table' is used by multiple models: " "check_framework.Model1, check_framework.Model2.", obj='test_table', id='models.E028', ) ]) @modify_settings(INSTALLED_APPS={'append': 'basic'}) @isolate_apps('basic', 'check_framework', kwarg_name='apps') def test_collision_across_apps(self, apps): class Model1(models.Model): class Meta: app_label = 'basic' db_table = 'test_table' class Model2(models.Model): class Meta: app_label = 'check_framework' db_table = 'test_table' self.assertEqual(checks.run_checks(app_configs=apps.get_app_configs()), [ Error( "db_table 'test_table' is used by multiple models: " "basic.Model1, check_framework.Model2.", obj='test_table', id='models.E028', ) ]) def test_no_collision_for_unmanaged_models(self): class Unmanaged(models.Model): class Meta: db_table = 'test_table' managed = False class Managed(models.Model): class Meta: db_table = 'test_table' self.assertEqual(checks.run_checks(app_configs=self.apps.get_app_configs()), []) def test_no_collision_for_proxy_models(self): class Model(models.Model): class Meta: db_table = 'test_table' class ProxyModel(Model): class Meta: proxy = True self.assertEqual(Model._meta.db_table, ProxyModel._meta.db_table) self.assertEqual(checks.run_checks(app_configs=self.apps.get_app_configs()), []) @isolate_apps('check_framework', attr_name='apps') @override_system_checks([checks.model_checks.check_all_models]) class IndexNameTests(SimpleTestCase): def test_collision_in_same_model(self): index = models.Index(fields=['id'], name='foo') class Model(models.Model): class Meta: indexes = [index, index] self.assertEqual(checks.run_checks(app_configs=self.apps.get_app_configs()), [ Error( "index name 'foo' is not unique for model check_framework.Model.", id='models.E029', ), ]) def test_collision_in_different_models(self): index = models.Index(fields=['id'], name='foo') class Model1(models.Model): class Meta: indexes = [index] class Model2(models.Model): class Meta: indexes = [index] self.assertEqual(checks.run_checks(app_configs=self.apps.get_app_configs()), [ Error( "index name 'foo' is not unique amongst models: " "check_framework.Model1, check_framework.Model2.", id='models.E030', ), ]) def test_collision_abstract_model(self): class AbstractModel(models.Model): class Meta: indexes = [models.Index(fields=['id'], name='foo')] abstract = True class Model1(AbstractModel): pass class Model2(AbstractModel): pass self.assertEqual(checks.run_checks(app_configs=self.apps.get_app_configs()), [ Error( "index name 'foo' is not unique amongst models: " "check_framework.Model1, check_framework.Model2.", id='models.E030', ), ]) def test_no_collision_abstract_model_interpolation(self): class AbstractModel(models.Model): name = models.CharField(max_length=20) class Meta: indexes = [models.Index(fields=['name'], name='%(app_label)s_%(class)s_foo')] abstract = True class Model1(AbstractModel): pass class Model2(AbstractModel): pass self.assertEqual(checks.run_checks(app_configs=self.apps.get_app_configs()), []) @modify_settings(INSTALLED_APPS={'append': 'basic'}) @isolate_apps('basic', 'check_framework', kwarg_name='apps') def test_collision_across_apps(self, apps): index = models.Index(fields=['id'], name='foo') class Model1(models.Model): class Meta: app_label = 'basic' indexes = [index] class Model2(models.Model): class Meta: app_label = 'check_framework' indexes = [index] self.assertEqual(checks.run_checks(app_configs=apps.get_app_configs()), [ Error( "index name 'foo' is not unique amongst models: basic.Model1, " "check_framework.Model2.", id='models.E030', ), ]) @modify_settings(INSTALLED_APPS={'append': 'basic'}) @isolate_apps('basic', 'check_framework', kwarg_name='apps') def test_no_collision_across_apps_interpolation(self, apps): index = models.Index(fields=['id'], name='%(app_label)s_%(class)s_foo') class Model1(models.Model): class Meta: app_label = 'basic' constraints = [index] class Model2(models.Model): class Meta: app_label = 'check_framework' constraints = [index] self.assertEqual(checks.run_checks(app_configs=apps.get_app_configs()), []) @isolate_apps('check_framework', attr_name='apps') @override_system_checks([checks.model_checks.check_all_models]) @skipUnlessDBFeature('supports_table_check_constraints') class ConstraintNameTests(TestCase): def test_collision_in_same_model(self): class Model(models.Model): class Meta: constraints = [ models.CheckConstraint(check=models.Q(id__gt=0), name='foo'), models.CheckConstraint(check=models.Q(id__lt=100), name='foo'), ] self.assertEqual(checks.run_checks(app_configs=self.apps.get_app_configs()), [ Error( "constraint name 'foo' is not unique for model " "check_framework.Model.", id='models.E031', ), ]) def test_collision_in_different_models(self): constraint = models.CheckConstraint(check=models.Q(id__gt=0), name='foo') class Model1(models.Model): class Meta: constraints = [constraint] class Model2(models.Model): class Meta: constraints = [constraint] self.assertEqual(checks.run_checks(app_configs=self.apps.get_app_configs()), [ Error( "constraint name 'foo' is not unique amongst models: " "check_framework.Model1, check_framework.Model2.", id='models.E032', ), ]) def test_collision_abstract_model(self): class AbstractModel(models.Model): class Meta: constraints = [models.CheckConstraint(check=models.Q(id__gt=0), name='foo')] abstract = True class Model1(AbstractModel): pass class Model2(AbstractModel): pass self.assertEqual(checks.run_checks(app_configs=self.apps.get_app_configs()), [ Error( "constraint name 'foo' is not unique amongst models: " "check_framework.Model1, check_framework.Model2.", id='models.E032', ), ]) def test_no_collision_abstract_model_interpolation(self): class AbstractModel(models.Model): class Meta: constraints = [ models.CheckConstraint(check=models.Q(id__gt=0), name='%(app_label)s_%(class)s_foo'), ] abstract = True class Model1(AbstractModel): pass class Model2(AbstractModel): pass self.assertEqual(checks.run_checks(app_configs=self.apps.get_app_configs()), []) @modify_settings(INSTALLED_APPS={'append': 'basic'}) @isolate_apps('basic', 'check_framework', kwarg_name='apps') def test_collision_across_apps(self, apps): constraint = models.CheckConstraint(check=models.Q(id__gt=0), name='foo') class Model1(models.Model): class Meta: app_label = 'basic' constraints = [constraint] class Model2(models.Model): class Meta: app_label = 'check_framework' constraints = [constraint] self.assertEqual(checks.run_checks(app_configs=apps.get_app_configs()), [ Error( "constraint name 'foo' is not unique amongst models: " "basic.Model1, check_framework.Model2.", id='models.E032', ), ]) @modify_settings(INSTALLED_APPS={'append': 'basic'}) @isolate_apps('basic', 'check_framework', kwarg_name='apps') def test_no_collision_across_apps_interpolation(self, apps): constraint = models.CheckConstraint(check=models.Q(id__gt=0), name='%(app_label)s_%(class)s_foo') class Model1(models.Model): class Meta: app_label = 'basic' constraints = [constraint] class Model2(models.Model): class Meta: app_label = 'check_framework' constraints = [constraint] self.assertEqual(checks.run_checks(app_configs=apps.get_app_configs()), [])
025c61b796463e5b51c132fd43cc326e8a824729dcd811424381546f4e436dc3
import unittest from django.conf import settings from django.core.checks import Error, Warning from django.core.checks.model_checks import _check_lazy_references from django.core.exceptions import ImproperlyConfigured from django.db import connection, connections, models from django.db.models.functions import Lower from django.db.models.signals import post_init from django.test import SimpleTestCase from django.test.utils import isolate_apps, override_settings, register_lookup def get_max_column_name_length(): allowed_len = None db_alias = None for db in settings.DATABASES: connection = connections[db] max_name_length = connection.ops.max_name_length() if max_name_length is not None and not connection.features.truncates_names: if allowed_len is None or max_name_length < allowed_len: allowed_len = max_name_length db_alias = db return (allowed_len, db_alias) @isolate_apps('invalid_models_tests') class IndexTogetherTests(SimpleTestCase): def test_non_iterable(self): class Model(models.Model): class Meta: index_together = 42 self.assertEqual(Model.check(), [ Error( "'index_together' must be a list or tuple.", obj=Model, id='models.E008', ), ]) def test_non_list(self): class Model(models.Model): class Meta: index_together = 'not-a-list' self.assertEqual(Model.check(), [ Error( "'index_together' must be a list or tuple.", obj=Model, id='models.E008', ), ]) def test_list_containing_non_iterable(self): class Model(models.Model): class Meta: index_together = [('a', 'b'), 42] self.assertEqual(Model.check(), [ Error( "All 'index_together' elements must be lists or tuples.", obj=Model, id='models.E009', ), ]) def test_pointing_to_missing_field(self): class Model(models.Model): class Meta: index_together = [['missing_field']] self.assertEqual(Model.check(), [ Error( "'index_together' refers to the nonexistent field 'missing_field'.", obj=Model, id='models.E012', ), ]) def test_pointing_to_non_local_field(self): class Foo(models.Model): field1 = models.IntegerField() class Bar(Foo): field2 = models.IntegerField() class Meta: index_together = [['field2', 'field1']] self.assertEqual(Bar.check(), [ Error( "'index_together' refers to field 'field1' which is not " "local to model 'Bar'.", hint='This issue may be caused by multi-table inheritance.', obj=Bar, id='models.E016', ), ]) def test_pointing_to_m2m_field(self): class Model(models.Model): m2m = models.ManyToManyField('self') class Meta: index_together = [['m2m']] self.assertEqual(Model.check(), [ Error( "'index_together' refers to a ManyToManyField 'm2m', but " "ManyToManyFields are not permitted in 'index_together'.", obj=Model, id='models.E013', ), ]) def test_pointing_to_fk(self): class Foo(models.Model): pass class Bar(models.Model): foo_1 = models.ForeignKey(Foo, on_delete=models.CASCADE, related_name='bar_1') foo_2 = models.ForeignKey(Foo, on_delete=models.CASCADE, related_name='bar_2') class Meta: index_together = [['foo_1_id', 'foo_2']] self.assertEqual(Bar.check(), []) # unique_together tests are very similar to index_together tests. @isolate_apps('invalid_models_tests') class UniqueTogetherTests(SimpleTestCase): def test_non_iterable(self): class Model(models.Model): class Meta: unique_together = 42 self.assertEqual(Model.check(), [ Error( "'unique_together' must be a list or tuple.", obj=Model, id='models.E010', ), ]) def test_list_containing_non_iterable(self): class Model(models.Model): one = models.IntegerField() two = models.IntegerField() class Meta: unique_together = [('a', 'b'), 42] self.assertEqual(Model.check(), [ Error( "All 'unique_together' elements must be lists or tuples.", obj=Model, id='models.E011', ), ]) def test_non_list(self): class Model(models.Model): class Meta: unique_together = 'not-a-list' self.assertEqual(Model.check(), [ Error( "'unique_together' must be a list or tuple.", obj=Model, id='models.E010', ), ]) def test_valid_model(self): class Model(models.Model): one = models.IntegerField() two = models.IntegerField() class Meta: # unique_together can be a simple tuple unique_together = ('one', 'two') self.assertEqual(Model.check(), []) def test_pointing_to_missing_field(self): class Model(models.Model): class Meta: unique_together = [['missing_field']] self.assertEqual(Model.check(), [ Error( "'unique_together' refers to the nonexistent field 'missing_field'.", obj=Model, id='models.E012', ), ]) def test_pointing_to_m2m(self): class Model(models.Model): m2m = models.ManyToManyField('self') class Meta: unique_together = [['m2m']] self.assertEqual(Model.check(), [ Error( "'unique_together' refers to a ManyToManyField 'm2m', but " "ManyToManyFields are not permitted in 'unique_together'.", obj=Model, id='models.E013', ), ]) def test_pointing_to_fk(self): class Foo(models.Model): pass class Bar(models.Model): foo_1 = models.ForeignKey(Foo, on_delete=models.CASCADE, related_name='bar_1') foo_2 = models.ForeignKey(Foo, on_delete=models.CASCADE, related_name='bar_2') class Meta: unique_together = [['foo_1_id', 'foo_2']] self.assertEqual(Bar.check(), []) @isolate_apps('invalid_models_tests') class IndexesTests(SimpleTestCase): def test_pointing_to_missing_field(self): class Model(models.Model): class Meta: indexes = [models.Index(fields=['missing_field'], name='name')] self.assertEqual(Model.check(), [ Error( "'indexes' refers to the nonexistent field 'missing_field'.", obj=Model, id='models.E012', ), ]) def test_pointing_to_m2m_field(self): class Model(models.Model): m2m = models.ManyToManyField('self') class Meta: indexes = [models.Index(fields=['m2m'], name='name')] self.assertEqual(Model.check(), [ Error( "'indexes' refers to a ManyToManyField 'm2m', but " "ManyToManyFields are not permitted in 'indexes'.", obj=Model, id='models.E013', ), ]) def test_pointing_to_non_local_field(self): class Foo(models.Model): field1 = models.IntegerField() class Bar(Foo): field2 = models.IntegerField() class Meta: indexes = [models.Index(fields=['field2', 'field1'], name='name')] self.assertEqual(Bar.check(), [ Error( "'indexes' refers to field 'field1' which is not local to " "model 'Bar'.", hint='This issue may be caused by multi-table inheritance.', obj=Bar, id='models.E016', ), ]) def test_pointing_to_fk(self): class Foo(models.Model): pass class Bar(models.Model): foo_1 = models.ForeignKey(Foo, on_delete=models.CASCADE, related_name='bar_1') foo_2 = models.ForeignKey(Foo, on_delete=models.CASCADE, related_name='bar_2') class Meta: indexes = [models.Index(fields=['foo_1_id', 'foo_2'], name='index_name')] self.assertEqual(Bar.check(), []) def test_name_constraints(self): class Model(models.Model): class Meta: indexes = [ models.Index(fields=['id'], name='_index_name'), models.Index(fields=['id'], name='5index_name'), ] self.assertEqual(Model.check(), [ Error( "The index name '%sindex_name' cannot start with an " "underscore or a number." % prefix, obj=Model, id='models.E033', ) for prefix in ('_', '5') ]) def test_max_name_length(self): index_name = 'x' * 31 class Model(models.Model): class Meta: indexes = [models.Index(fields=['id'], name=index_name)] self.assertEqual(Model.check(), [ Error( "The index name '%s' cannot be longer than 30 characters." % index_name, obj=Model, id='models.E034', ), ]) @isolate_apps('invalid_models_tests') class FieldNamesTests(SimpleTestCase): def test_ending_with_underscore(self): class Model(models.Model): field_ = models.CharField(max_length=10) m2m_ = models.ManyToManyField('self') self.assertEqual(Model.check(), [ Error( 'Field names must not end with an underscore.', obj=Model._meta.get_field('field_'), id='fields.E001', ), Error( 'Field names must not end with an underscore.', obj=Model._meta.get_field('m2m_'), id='fields.E001', ), ]) max_column_name_length, column_limit_db_alias = get_max_column_name_length() @unittest.skipIf(max_column_name_length is None, "The database doesn't have a column name length limit.") def test_M2M_long_column_name(self): """ #13711 -- Model check for long M2M column names when database has column name length limits. """ allowed_len, db_alias = get_max_column_name_length() # A model with very long name which will be used to set relations to. class VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz(models.Model): title = models.CharField(max_length=11) # Main model for which checks will be performed. class ModelWithLongField(models.Model): m2m_field = models.ManyToManyField( VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz, related_name='rn1', ) m2m_field2 = models.ManyToManyField( VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz, related_name='rn2', through='m2msimple', ) m2m_field3 = models.ManyToManyField( VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz, related_name='rn3', through='m2mcomplex', ) fk = models.ForeignKey( VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz, models.CASCADE, related_name='rn4', ) # Models used for setting `through` in M2M field. class m2msimple(models.Model): id2 = models.ForeignKey(ModelWithLongField, models.CASCADE) class m2mcomplex(models.Model): id2 = models.ForeignKey(ModelWithLongField, models.CASCADE) long_field_name = 'a' * (self.max_column_name_length + 1) models.ForeignKey( VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz, models.CASCADE, ).contribute_to_class(m2msimple, long_field_name) models.ForeignKey( VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz, models.CASCADE, db_column=long_field_name ).contribute_to_class(m2mcomplex, long_field_name) errors = ModelWithLongField.check() # First error because of M2M field set on the model with long name. m2m_long_name = "verylongmodelnamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz_id" if self.max_column_name_length > len(m2m_long_name): # Some databases support names longer than the test name. expected = [] else: expected = [ Error( 'Autogenerated column name too long for M2M field "%s". ' 'Maximum length is "%s" for database "%s".' % (m2m_long_name, self.max_column_name_length, self.column_limit_db_alias), hint="Use 'through' to create a separate model for " "M2M and then set column_name using 'db_column'.", obj=ModelWithLongField, id='models.E019', ) ] # Second error because the FK specified in the `through` model # `m2msimple` has auto-generated name longer than allowed. # There will be no check errors in the other M2M because it # specifies db_column for the FK in `through` model even if the actual # name is longer than the limits of the database. expected.append( Error( 'Autogenerated column name too long for M2M field "%s_id". ' 'Maximum length is "%s" for database "%s".' % (long_field_name, self.max_column_name_length, self.column_limit_db_alias), hint="Use 'through' to create a separate model for " "M2M and then set column_name using 'db_column'.", obj=ModelWithLongField, id='models.E019', ) ) self.assertEqual(errors, expected) @unittest.skipIf(max_column_name_length is None, "The database doesn't have a column name length limit.") def test_local_field_long_column_name(self): """ #13711 -- Model check for long column names when database does not support long names. """ allowed_len, db_alias = get_max_column_name_length() class ModelWithLongField(models.Model): title = models.CharField(max_length=11) long_field_name = 'a' * (self.max_column_name_length + 1) long_field_name2 = 'b' * (self.max_column_name_length + 1) models.CharField(max_length=11).contribute_to_class(ModelWithLongField, long_field_name) models.CharField(max_length=11, db_column='vlmn').contribute_to_class(ModelWithLongField, long_field_name2) self.assertEqual(ModelWithLongField.check(), [ Error( 'Autogenerated column name too long for field "%s". ' 'Maximum length is "%s" for database "%s".' % (long_field_name, self.max_column_name_length, self.column_limit_db_alias), hint="Set the column name manually using 'db_column'.", obj=ModelWithLongField, id='models.E018', ) ]) def test_including_separator(self): class Model(models.Model): some__field = models.IntegerField() self.assertEqual(Model.check(), [ Error( 'Field names must not contain "__".', obj=Model._meta.get_field('some__field'), id='fields.E002', ) ]) def test_pk(self): class Model(models.Model): pk = models.IntegerField() self.assertEqual(Model.check(), [ Error( "'pk' is a reserved word that cannot be used as a field name.", obj=Model._meta.get_field('pk'), id='fields.E003', ) ]) def test_db_column_clash(self): class Model(models.Model): foo = models.IntegerField() bar = models.IntegerField(db_column='foo') self.assertEqual(Model.check(), [ Error( "Field 'bar' has column name 'foo' that is used by " "another field.", hint="Specify a 'db_column' for the field.", obj=Model, id='models.E007', ) ]) @isolate_apps('invalid_models_tests') class ShadowingFieldsTests(SimpleTestCase): def test_field_name_clash_with_child_accessor(self): class Parent(models.Model): pass class Child(Parent): child = models.CharField(max_length=100) self.assertEqual(Child.check(), [ Error( "The field 'child' clashes with the field " "'child' from model 'invalid_models_tests.parent'.", obj=Child._meta.get_field('child'), id='models.E006', ) ]) def test_multiinheritance_clash(self): class Mother(models.Model): clash = models.IntegerField() class Father(models.Model): clash = models.IntegerField() class Child(Mother, Father): # Here we have two clashed: id (automatic field) and clash, because # both parents define these fields. pass self.assertEqual(Child.check(), [ Error( "The field 'id' from parent model " "'invalid_models_tests.mother' clashes with the field 'id' " "from parent model 'invalid_models_tests.father'.", obj=Child, id='models.E005', ), Error( "The field 'clash' from parent model " "'invalid_models_tests.mother' clashes with the field 'clash' " "from parent model 'invalid_models_tests.father'.", obj=Child, id='models.E005', ) ]) def test_inheritance_clash(self): class Parent(models.Model): f_id = models.IntegerField() class Target(models.Model): # This field doesn't result in a clash. f_id = models.IntegerField() class Child(Parent): # This field clashes with parent "f_id" field. f = models.ForeignKey(Target, models.CASCADE) self.assertEqual(Child.check(), [ Error( "The field 'f' clashes with the field 'f_id' " "from model 'invalid_models_tests.parent'.", obj=Child._meta.get_field('f'), id='models.E006', ) ]) def test_multigeneration_inheritance(self): class GrandParent(models.Model): clash = models.IntegerField() class Parent(GrandParent): pass class Child(Parent): pass class GrandChild(Child): clash = models.IntegerField() self.assertEqual(GrandChild.check(), [ Error( "The field 'clash' clashes with the field 'clash' " "from model 'invalid_models_tests.grandparent'.", obj=GrandChild._meta.get_field('clash'), id='models.E006', ) ]) def test_id_clash(self): class Target(models.Model): pass class Model(models.Model): fk = models.ForeignKey(Target, models.CASCADE) fk_id = models.IntegerField() self.assertEqual(Model.check(), [ Error( "The field 'fk_id' clashes with the field 'fk' from model " "'invalid_models_tests.model'.", obj=Model._meta.get_field('fk_id'), id='models.E006', ) ]) @isolate_apps('invalid_models_tests') class OtherModelTests(SimpleTestCase): def test_unique_primary_key(self): invalid_id = models.IntegerField(primary_key=False) class Model(models.Model): id = invalid_id self.assertEqual(Model.check(), [ Error( "'id' can only be used as a field name if the field also sets " "'primary_key=True'.", obj=Model, id='models.E004', ), ]) def test_ordering_non_iterable(self): class Model(models.Model): class Meta: ordering = 'missing_field' self.assertEqual(Model.check(), [ Error( "'ordering' must be a tuple or list " "(even if you want to order by only one field).", obj=Model, id='models.E014', ), ]) def test_just_ordering_no_errors(self): class Model(models.Model): order = models.PositiveIntegerField() class Meta: ordering = ['order'] self.assertEqual(Model.check(), []) def test_just_order_with_respect_to_no_errors(self): class Question(models.Model): pass class Answer(models.Model): question = models.ForeignKey(Question, models.CASCADE) class Meta: order_with_respect_to = 'question' self.assertEqual(Answer.check(), []) def test_ordering_with_order_with_respect_to(self): class Question(models.Model): pass class Answer(models.Model): question = models.ForeignKey(Question, models.CASCADE) order = models.IntegerField() class Meta: order_with_respect_to = 'question' ordering = ['order'] self.assertEqual(Answer.check(), [ Error( "'ordering' and 'order_with_respect_to' cannot be used together.", obj=Answer, id='models.E021', ), ]) def test_non_valid(self): class RelationModel(models.Model): pass class Model(models.Model): relation = models.ManyToManyField(RelationModel) class Meta: ordering = ['relation'] self.assertEqual(Model.check(), [ Error( "'ordering' refers to the nonexistent field, related field, " "or lookup 'relation'.", obj=Model, id='models.E015', ), ]) def test_ordering_pointing_to_missing_field(self): class Model(models.Model): class Meta: ordering = ('missing_field',) self.assertEqual(Model.check(), [ Error( "'ordering' refers to the nonexistent field, related field, " "or lookup 'missing_field'.", obj=Model, id='models.E015', ) ]) def test_ordering_pointing_to_missing_foreignkey_field(self): class Model(models.Model): missing_fk_field = models.IntegerField() class Meta: ordering = ('missing_fk_field_id',) self.assertEqual(Model.check(), [ Error( "'ordering' refers to the nonexistent field, related field, " "or lookup 'missing_fk_field_id'.", obj=Model, id='models.E015', ) ]) def test_ordering_pointing_to_missing_related_field(self): class Model(models.Model): test = models.IntegerField() class Meta: ordering = ('missing_related__id',) self.assertEqual(Model.check(), [ Error( "'ordering' refers to the nonexistent field, related field, " "or lookup 'missing_related__id'.", obj=Model, id='models.E015', ) ]) def test_ordering_pointing_to_missing_related_model_field(self): class Parent(models.Model): pass class Child(models.Model): parent = models.ForeignKey(Parent, models.CASCADE) class Meta: ordering = ('parent__missing_field',) self.assertEqual(Child.check(), [ Error( "'ordering' refers to the nonexistent field, related field, " "or lookup 'parent__missing_field'.", obj=Child, id='models.E015', ) ]) def test_ordering_pointing_to_non_related_field(self): class Child(models.Model): parent = models.IntegerField() class Meta: ordering = ('parent__missing_field',) self.assertEqual(Child.check(), [ Error( "'ordering' refers to the nonexistent field, related field, " "or lookup 'parent__missing_field'.", obj=Child, id='models.E015', ) ]) def test_ordering_pointing_to_two_related_model_field(self): class Parent2(models.Model): pass class Parent1(models.Model): parent2 = models.ForeignKey(Parent2, models.CASCADE) class Child(models.Model): parent1 = models.ForeignKey(Parent1, models.CASCADE) class Meta: ordering = ('parent1__parent2__missing_field',) self.assertEqual(Child.check(), [ Error( "'ordering' refers to the nonexistent field, related field, " "or lookup 'parent1__parent2__missing_field'.", obj=Child, id='models.E015', ) ]) def test_ordering_allows_registered_lookups(self): class Model(models.Model): test = models.CharField(max_length=100) class Meta: ordering = ('test__lower',) with register_lookup(models.CharField, Lower): self.assertEqual(Model.check(), []) def test_ordering_pointing_to_foreignkey_field(self): class Parent(models.Model): pass class Child(models.Model): parent = models.ForeignKey(Parent, models.CASCADE) class Meta: ordering = ('parent_id',) self.assertFalse(Child.check()) def test_name_beginning_with_underscore(self): class _Model(models.Model): pass self.assertEqual(_Model.check(), [ Error( "The model name '_Model' cannot start or end with an underscore " "as it collides with the query lookup syntax.", obj=_Model, id='models.E023', ) ]) def test_name_ending_with_underscore(self): class Model_(models.Model): pass self.assertEqual(Model_.check(), [ Error( "The model name 'Model_' cannot start or end with an underscore " "as it collides with the query lookup syntax.", obj=Model_, id='models.E023', ) ]) def test_name_contains_double_underscores(self): class Test__Model(models.Model): pass self.assertEqual(Test__Model.check(), [ Error( "The model name 'Test__Model' cannot contain double underscores " "as it collides with the query lookup syntax.", obj=Test__Model, id='models.E024', ) ]) def test_property_and_related_field_accessor_clash(self): class Model(models.Model): fk = models.ForeignKey('self', models.CASCADE) @property def fk_id(self): pass self.assertEqual(Model.check(), [ Error( "The property 'fk_id' clashes with a related field accessor.", obj=Model, id='models.E025', ) ]) def test_single_primary_key(self): class Model(models.Model): foo = models.IntegerField(primary_key=True) bar = models.IntegerField(primary_key=True) self.assertEqual(Model.check(), [ Error( "The model cannot have more than one field with 'primary_key=True'.", obj=Model, id='models.E026', ) ]) @override_settings(TEST_SWAPPED_MODEL_BAD_VALUE='not-a-model') def test_swappable_missing_app_name(self): class Model(models.Model): class Meta: swappable = 'TEST_SWAPPED_MODEL_BAD_VALUE' self.assertEqual(Model.check(), [ Error( "'TEST_SWAPPED_MODEL_BAD_VALUE' is not of the form 'app_label.app_name'.", id='models.E001', ), ]) @override_settings(TEST_SWAPPED_MODEL_BAD_MODEL='not_an_app.Target') def test_swappable_missing_app(self): class Model(models.Model): class Meta: swappable = 'TEST_SWAPPED_MODEL_BAD_MODEL' self.assertEqual(Model.check(), [ Error( "'TEST_SWAPPED_MODEL_BAD_MODEL' references 'not_an_app.Target', " 'which has not been installed, or is abstract.', id='models.E002', ), ]) def test_two_m2m_through_same_relationship(self): class Person(models.Model): pass class Group(models.Model): primary = models.ManyToManyField(Person, through='Membership', related_name='primary') secondary = models.ManyToManyField(Person, through='Membership', related_name='secondary') class Membership(models.Model): person = models.ForeignKey(Person, models.CASCADE) group = models.ForeignKey(Group, models.CASCADE) self.assertEqual(Group.check(), [ Error( "The model has two identical many-to-many relations through " "the intermediate model 'invalid_models_tests.Membership'.", obj=Group, id='models.E003', ) ]) def test_two_m2m_through_same_model_with_different_through_fields(self): class Country(models.Model): pass class ShippingMethod(models.Model): to_countries = models.ManyToManyField( Country, through='ShippingMethodPrice', through_fields=('method', 'to_country'), ) from_countries = models.ManyToManyField( Country, through='ShippingMethodPrice', through_fields=('method', 'from_country'), related_name='+', ) class ShippingMethodPrice(models.Model): method = models.ForeignKey(ShippingMethod, models.CASCADE) to_country = models.ForeignKey(Country, models.CASCADE) from_country = models.ForeignKey(Country, models.CASCADE) self.assertEqual(ShippingMethod.check(), []) def test_missing_parent_link(self): msg = 'Add parent_link=True to invalid_models_tests.ParkingLot.parent.' with self.assertRaisesMessage(ImproperlyConfigured, msg): class Place(models.Model): pass class ParkingLot(Place): parent = models.OneToOneField(Place, models.CASCADE) def test_m2m_table_name_clash(self): class Foo(models.Model): bar = models.ManyToManyField('Bar', db_table='myapp_bar') class Meta: db_table = 'myapp_foo' class Bar(models.Model): class Meta: db_table = 'myapp_bar' self.assertEqual(Foo.check(), [ Error( "The field's intermediary table 'myapp_bar' clashes with the " "table name of 'invalid_models_tests.Bar'.", obj=Foo._meta.get_field('bar'), id='fields.E340', ) ]) def test_m2m_field_table_name_clash(self): class Foo(models.Model): pass class Bar(models.Model): foos = models.ManyToManyField(Foo, db_table='clash') class Baz(models.Model): foos = models.ManyToManyField(Foo, db_table='clash') self.assertEqual(Bar.check() + Baz.check(), [ Error( "The field's intermediary table 'clash' clashes with the " "table name of 'invalid_models_tests.Baz.foos'.", obj=Bar._meta.get_field('foos'), id='fields.E340', ), Error( "The field's intermediary table 'clash' clashes with the " "table name of 'invalid_models_tests.Bar.foos'.", obj=Baz._meta.get_field('foos'), id='fields.E340', ) ]) def test_m2m_autogenerated_table_name_clash(self): class Foo(models.Model): class Meta: db_table = 'bar_foos' class Bar(models.Model): # The autogenerated `db_table` will be bar_foos. foos = models.ManyToManyField(Foo) class Meta: db_table = 'bar' self.assertEqual(Bar.check(), [ Error( "The field's intermediary table 'bar_foos' clashes with the " "table name of 'invalid_models_tests.Foo'.", obj=Bar._meta.get_field('foos'), id='fields.E340', ) ]) def test_m2m_unmanaged_shadow_models_not_checked(self): class A1(models.Model): pass class C1(models.Model): mm_a = models.ManyToManyField(A1, db_table='d1') # Unmanaged models that shadow the above models. Reused table names # shouldn't be flagged by any checks. class A2(models.Model): class Meta: managed = False class C2(models.Model): mm_a = models.ManyToManyField(A2, through='Intermediate') class Meta: managed = False class Intermediate(models.Model): a2 = models.ForeignKey(A2, models.CASCADE, db_column='a1_id') c2 = models.ForeignKey(C2, models.CASCADE, db_column='c1_id') class Meta: db_table = 'd1' managed = False self.assertEqual(C1.check(), []) self.assertEqual(C2.check(), []) def test_m2m_to_concrete_and_proxy_allowed(self): class A(models.Model): pass class Through(models.Model): a = models.ForeignKey('A', models.CASCADE) c = models.ForeignKey('C', models.CASCADE) class ThroughProxy(Through): class Meta: proxy = True class C(models.Model): mm_a = models.ManyToManyField(A, through=Through) mm_aproxy = models.ManyToManyField(A, through=ThroughProxy, related_name='proxied_m2m') self.assertEqual(C.check(), []) @isolate_apps('django.contrib.auth', kwarg_name='apps') def test_lazy_reference_checks(self, apps): class DummyModel(models.Model): author = models.ForeignKey('Author', models.CASCADE) class Meta: app_label = 'invalid_models_tests' class DummyClass: def __call__(self, **kwargs): pass def dummy_method(self): pass def dummy_function(*args, **kwargs): pass apps.lazy_model_operation(dummy_function, ('auth', 'imaginarymodel')) apps.lazy_model_operation(dummy_function, ('fanciful_app', 'imaginarymodel')) post_init.connect(dummy_function, sender='missing-app.Model', apps=apps) post_init.connect(DummyClass(), sender='missing-app.Model', apps=apps) post_init.connect(DummyClass().dummy_method, sender='missing-app.Model', apps=apps) self.assertEqual(_check_lazy_references(apps), [ Error( "%r contains a lazy reference to auth.imaginarymodel, " "but app 'auth' doesn't provide model 'imaginarymodel'." % dummy_function, obj=dummy_function, id='models.E022', ), Error( "%r contains a lazy reference to fanciful_app.imaginarymodel, " "but app 'fanciful_app' isn't installed." % dummy_function, obj=dummy_function, id='models.E022', ), Error( "An instance of class 'DummyClass' was connected to " "the 'post_init' signal with a lazy reference to the sender " "'missing-app.model', but app 'missing-app' isn't installed.", hint=None, obj='invalid_models_tests.test_models', id='signals.E001', ), Error( "Bound method 'DummyClass.dummy_method' was connected to the " "'post_init' signal with a lazy reference to the sender " "'missing-app.model', but app 'missing-app' isn't installed.", hint=None, obj='invalid_models_tests.test_models', id='signals.E001', ), Error( "The field invalid_models_tests.DummyModel.author was declared " "with a lazy reference to 'invalid_models_tests.author', but app " "'invalid_models_tests' isn't installed.", hint=None, obj=DummyModel.author.field, id='fields.E307', ), Error( "The function 'dummy_function' was connected to the 'post_init' " "signal with a lazy reference to the sender " "'missing-app.model', but app 'missing-app' isn't installed.", hint=None, obj='invalid_models_tests.test_models', id='signals.E001', ), ]) @isolate_apps('invalid_models_tests') class ConstraintsTests(SimpleTestCase): def test_check_constraints(self): class Model(models.Model): age = models.IntegerField() class Meta: constraints = [models.CheckConstraint(check=models.Q(age__gte=18), name='is_adult')] errors = Model.check() warn = Warning( '%s does not support check constraints.' % connection.display_name, hint=( "A constraint won't be created. Silence this warning if you " "don't care about it." ), obj=Model, id='models.W027', ) expected = [] if connection.features.supports_table_check_constraints else [warn, warn] self.assertCountEqual(errors, expected)
f464c2388c76cef71aed293f16a88b94ebe56333b8933f6a49804f1978b734bb
from django.core.exceptions import ValidationError from django.db import IntegrityError, connection, models from django.db.models.constraints import BaseConstraint from django.test import SimpleTestCase, TestCase, skipUnlessDBFeature from .models import ChildModel, Product def get_constraints(table): with connection.cursor() as cursor: return connection.introspection.get_constraints(cursor, table) class BaseConstraintTests(SimpleTestCase): def test_constraint_sql(self): c = BaseConstraint('name') msg = 'This method must be implemented by a subclass.' with self.assertRaisesMessage(NotImplementedError, msg): c.constraint_sql(None, None) def test_create_sql(self): c = BaseConstraint('name') msg = 'This method must be implemented by a subclass.' with self.assertRaisesMessage(NotImplementedError, msg): c.create_sql(None, None) def test_remove_sql(self): c = BaseConstraint('name') msg = 'This method must be implemented by a subclass.' with self.assertRaisesMessage(NotImplementedError, msg): c.remove_sql(None, None) class CheckConstraintTests(TestCase): def test_eq(self): check1 = models.Q(price__gt=models.F('discounted_price')) check2 = models.Q(price__lt=models.F('discounted_price')) self.assertEqual( models.CheckConstraint(check=check1, name='price'), models.CheckConstraint(check=check1, name='price'), ) self.assertNotEqual( models.CheckConstraint(check=check1, name='price'), models.CheckConstraint(check=check1, name='price2'), ) self.assertNotEqual( models.CheckConstraint(check=check1, name='price'), models.CheckConstraint(check=check2, name='price'), ) self.assertNotEqual(models.CheckConstraint(check=check1, name='price'), 1) def test_repr(self): check = models.Q(price__gt=models.F('discounted_price')) name = 'price_gt_discounted_price' constraint = models.CheckConstraint(check=check, name=name) self.assertEqual( repr(constraint), "<CheckConstraint: check='{}' name='{}'>".format(check, name), ) def test_deconstruction(self): check = models.Q(price__gt=models.F('discounted_price')) name = 'price_gt_discounted_price' constraint = models.CheckConstraint(check=check, name=name) path, args, kwargs = constraint.deconstruct() self.assertEqual(path, 'django.db.models.CheckConstraint') self.assertEqual(args, ()) self.assertEqual(kwargs, {'check': check, 'name': name}) @skipUnlessDBFeature('supports_table_check_constraints') def test_database_constraint(self): Product.objects.create(name='Valid', price=10, discounted_price=5) with self.assertRaises(IntegrityError): Product.objects.create(name='Invalid', price=10, discounted_price=20) @skipUnlessDBFeature('supports_table_check_constraints') def test_name(self): constraints = get_constraints(Product._meta.db_table) for expected_name in ( 'price_gt_discounted_price', 'constraints_product_price_gt_0', ): with self.subTest(expected_name): self.assertIn(expected_name, constraints) @skipUnlessDBFeature('supports_table_check_constraints') def test_abstract_name(self): constraints = get_constraints(ChildModel._meta.db_table) self.assertIn('constraints_childmodel_adult', constraints) class UniqueConstraintTests(TestCase): @classmethod def setUpTestData(cls): cls.p1, cls.p2 = Product.objects.bulk_create([ Product(name='p1', color='red'), Product(name='p2'), ]) def test_eq(self): self.assertEqual( models.UniqueConstraint(fields=['foo', 'bar'], name='unique'), models.UniqueConstraint(fields=['foo', 'bar'], name='unique'), ) self.assertNotEqual( models.UniqueConstraint(fields=['foo', 'bar'], name='unique'), models.UniqueConstraint(fields=['foo', 'bar'], name='unique2'), ) self.assertNotEqual( models.UniqueConstraint(fields=['foo', 'bar'], name='unique'), models.UniqueConstraint(fields=['foo', 'baz'], name='unique'), ) self.assertNotEqual(models.UniqueConstraint(fields=['foo', 'bar'], name='unique'), 1) def test_eq_with_condition(self): self.assertEqual( models.UniqueConstraint( fields=['foo', 'bar'], name='unique', condition=models.Q(foo=models.F('bar')) ), models.UniqueConstraint( fields=['foo', 'bar'], name='unique', condition=models.Q(foo=models.F('bar'))), ) self.assertNotEqual( models.UniqueConstraint( fields=['foo', 'bar'], name='unique', condition=models.Q(foo=models.F('bar')) ), models.UniqueConstraint( fields=['foo', 'bar'], name='unique', condition=models.Q(foo=models.F('baz')) ), ) def test_repr(self): fields = ['foo', 'bar'] name = 'unique_fields' constraint = models.UniqueConstraint(fields=fields, name=name) self.assertEqual( repr(constraint), "<UniqueConstraint: fields=('foo', 'bar') name='unique_fields'>", ) def test_repr_with_condition(self): constraint = models.UniqueConstraint( fields=['foo', 'bar'], name='unique_fields', condition=models.Q(foo=models.F('bar')), ) self.assertEqual( repr(constraint), "<UniqueConstraint: fields=('foo', 'bar') name='unique_fields' " "condition=(AND: ('foo', F(bar)))>", ) def test_deconstruction(self): fields = ['foo', 'bar'] name = 'unique_fields' constraint = models.UniqueConstraint(fields=fields, name=name) path, args, kwargs = constraint.deconstruct() self.assertEqual(path, 'django.db.models.UniqueConstraint') self.assertEqual(args, ()) self.assertEqual(kwargs, {'fields': tuple(fields), 'name': name}) def test_deconstruction_with_condition(self): fields = ['foo', 'bar'] name = 'unique_fields' condition = models.Q(foo=models.F('bar')) constraint = models.UniqueConstraint(fields=fields, name=name, condition=condition) path, args, kwargs = constraint.deconstruct() self.assertEqual(path, 'django.db.models.UniqueConstraint') self.assertEqual(args, ()) self.assertEqual(kwargs, {'fields': tuple(fields), 'name': name, 'condition': condition}) def test_database_constraint(self): with self.assertRaises(IntegrityError): Product.objects.create(name=self.p1.name, color=self.p1.color) def test_model_validation(self): with self.assertRaisesMessage(ValidationError, 'Product with this Name and Color already exists.'): Product(name=self.p1.name, color=self.p1.color).validate_unique() def test_model_validation_with_condition(self): """Partial unique constraints are ignored by Model.validate_unique().""" Product(name=self.p1.name, color='blue').validate_unique() Product(name=self.p2.name).validate_unique() def test_name(self): constraints = get_constraints(Product._meta.db_table) expected_name = 'name_color_uniq' self.assertIn(expected_name, constraints) def test_condition_must_be_q(self): with self.assertRaisesMessage(ValueError, 'UniqueConstraint.condition must be a Q instance.'): models.UniqueConstraint(name='uniq', fields=['name'], condition='invalid')
2123a1225dc3be230a7ac3a7ddcc3dc7b7f8a03933279298a573d4ebf8ac57aa
from django.db import models class Product(models.Model): name = models.CharField(max_length=255) color = models.CharField(max_length=32, null=True) price = models.IntegerField(null=True) discounted_price = models.IntegerField(null=True) class Meta: constraints = [ models.CheckConstraint( check=models.Q(price__gt=models.F('discounted_price')), name='price_gt_discounted_price', ), models.CheckConstraint( check=models.Q(price__gt=0), name='%(app_label)s_%(class)s_price_gt_0', ), models.UniqueConstraint(fields=['name', 'color'], name='name_color_uniq'), models.UniqueConstraint( fields=['name'], name='name_without_color_uniq', condition=models.Q(color__isnull=True), ), ] class AbstractModel(models.Model): age = models.IntegerField() class Meta: abstract = True constraints = [ models.CheckConstraint( check=models.Q(age__gte=18), name='%(app_label)s_%(class)s_adult', ), ] class ChildModel(AbstractModel): pass
e823681b50255d0302a6260dbef4ff7b30fc78dbf36f02083f94c03e6c17cf3e
from django.core.serializers.json import DjangoJSONEncoder from django.db import migrations, models from ..fields import ( ArrayField, BigIntegerRangeField, CICharField, CIEmailField, CITextField, DateRangeField, DateTimeRangeField, DecimalRangeField, EnumField, HStoreField, IntegerRangeField, JSONField, SearchVectorField, ) from ..models import TagField class Migration(migrations.Migration): dependencies = [ ('postgres_tests', '0001_setup_extensions'), ] operations = [ migrations.CreateModel( name='CharArrayModel', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('field', ArrayField(models.CharField(max_length=10), size=None)), ], options={ 'required_db_vendor': 'postgresql', }, bases=(models.Model,), ), migrations.CreateModel( name='DateTimeArrayModel', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('datetimes', ArrayField(models.DateTimeField(), size=None)), ('dates', ArrayField(models.DateField(), size=None)), ('times', ArrayField(models.TimeField(), size=None)), ], options={ 'required_db_vendor': 'postgresql', }, bases=(models.Model,), ), migrations.CreateModel( name='HStoreModel', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('field', HStoreField(blank=True, null=True)), ('array_field', ArrayField(HStoreField(), null=True)), ], options={ 'required_db_vendor': 'postgresql', }, bases=(models.Model,), ), migrations.CreateModel( name='OtherTypesArrayModel', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('ips', ArrayField(models.GenericIPAddressField(), size=None, default=list)), ('uuids', ArrayField(models.UUIDField(), size=None, default=list)), ('decimals', ArrayField(models.DecimalField(max_digits=5, decimal_places=2), size=None, default=list)), ('tags', ArrayField(TagField(), blank=True, null=True, size=None)), ('json', ArrayField(JSONField(default={}), default=[])), ('int_ranges', ArrayField(IntegerRangeField(), null=True, blank=True)), ('bigint_ranges', ArrayField(BigIntegerRangeField(), null=True, blank=True)), ], options={ 'required_db_vendor': 'postgresql', }, bases=(models.Model,), ), migrations.CreateModel( name='IntegerArrayModel', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('field', ArrayField(models.IntegerField(), size=None)), ], options={ 'required_db_vendor': 'postgresql', }, bases=(models.Model,), ), migrations.CreateModel( name='NestedIntegerArrayModel', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('field', ArrayField(ArrayField(models.IntegerField(), size=None), size=None)), ], options={ 'required_db_vendor': 'postgresql', }, bases=(models.Model,), ), migrations.CreateModel( name='NullableIntegerArrayModel', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('field', ArrayField(models.IntegerField(), size=None, null=True, blank=True)), ], options={ 'required_db_vendor': 'postgresql', }, bases=(models.Model,), ), migrations.CreateModel( name='CharFieldModel', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('field', models.CharField(max_length=16)), ], options=None, bases=None, ), migrations.CreateModel( name='TextFieldModel', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('field', models.TextField()), ], options=None, bases=None, ), migrations.CreateModel( name='Scene', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('scene', models.CharField(max_length=255)), ('setting', models.CharField(max_length=255)), ], options=None, bases=None, ), migrations.CreateModel( name='Character', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=255)), ], options=None, bases=None, ), migrations.CreateModel( name='CITestModel', fields=[ ('name', CICharField(primary_key=True, max_length=255)), ('email', CIEmailField()), ('description', CITextField()), ('array_field', ArrayField(CITextField(), null=True)), ], options={ 'required_db_vendor': 'postgresql', }, bases=None, ), migrations.CreateModel( name='Line', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('scene', models.ForeignKey('postgres_tests.Scene', on_delete=models.SET_NULL)), ('character', models.ForeignKey('postgres_tests.Character', on_delete=models.SET_NULL)), ('dialogue', models.TextField(blank=True, null=True)), ('dialogue_search_vector', SearchVectorField(blank=True, null=True)), ('dialogue_config', models.CharField(max_length=100, blank=True, null=True)), ], options={ 'required_db_vendor': 'postgresql', }, bases=None, ), migrations.CreateModel( name='AggregateTestModel', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('boolean_field', models.BooleanField(null=True)), ('char_field', models.CharField(max_length=30, blank=True)), ('integer_field', models.IntegerField(null=True)), ] ), migrations.CreateModel( name='StatTestModel', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('int1', models.IntegerField()), ('int2', models.IntegerField()), ('related_field', models.ForeignKey( 'postgres_tests.AggregateTestModel', models.SET_NULL, null=True, )), ] ), migrations.CreateModel( name='NowTestModel', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('when', models.DateTimeField(null=True, default=None)), ] ), migrations.CreateModel( name='UUIDTestModel', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('uuid', models.UUIDField(default=None, null=True)), ] ), migrations.CreateModel( name='RangesModel', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('ints', IntegerRangeField(null=True, blank=True)), ('bigints', BigIntegerRangeField(null=True, blank=True)), ('decimals', DecimalRangeField(null=True, blank=True)), ('timestamps', DateTimeRangeField(null=True, blank=True)), ('timestamps_inner', DateTimeRangeField(null=True, blank=True)), ('dates', DateRangeField(null=True, blank=True)), ('dates_inner', DateRangeField(null=True, blank=True)), ], options={ 'required_db_vendor': 'postgresql' }, bases=(models.Model,) ), migrations.CreateModel( name='RangeLookupsModel', fields=[ ('parent', models.ForeignKey( 'postgres_tests.RangesModel', models.SET_NULL, blank=True, null=True, )), ('integer', models.IntegerField(blank=True, null=True)), ('big_integer', models.BigIntegerField(blank=True, null=True)), ('float', models.FloatField(blank=True, null=True)), ('timestamp', models.DateTimeField(blank=True, null=True)), ('date', models.DateField(blank=True, null=True)), ], options={ 'required_db_vendor': 'postgresql', }, bases=(models.Model,), ), migrations.CreateModel( name='JSONModel', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('field', JSONField(null=True, blank=True)), ('field_custom', JSONField(null=True, blank=True, encoder=DjangoJSONEncoder)), ], options={ 'required_db_vendor': 'postgresql', }, bases=(models.Model,), ), migrations.CreateModel( name='ArrayEnumModel', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('array_of_enums', ArrayField(EnumField(max_length=20), null=True, blank=True)), ], options={ 'required_db_vendor': 'postgresql', }, bases=(models.Model,), ), ]
a24cdf9f554a791db288882f8591d312f393c757dd78f38384f1b2e7c968317c
import copy import datetime import inspect from decimal import Decimal from django.core.exceptions import EmptyResultSet, FieldError from django.db import connection from django.db.models import fields from django.db.models.query_utils import Q from django.db.utils import NotSupportedError from django.utils.deconstruct import deconstructible from django.utils.functional import cached_property from django.utils.hashable import make_hashable class SQLiteNumericMixin: """ Some expressions with output_field=DecimalField() must be cast to numeric to be properly filtered. """ def as_sqlite(self, compiler, connection, **extra_context): sql, params = self.as_sql(compiler, connection, **extra_context) try: if self.output_field.get_internal_type() == 'DecimalField': sql = 'CAST(%s AS NUMERIC)' % sql except FieldError: pass return sql, params class Combinable: """ Provide the ability to combine one or two objects with some connector. For example F('foo') + F('bar'). """ # Arithmetic connectors ADD = '+' SUB = '-' MUL = '*' DIV = '/' POW = '^' # The following is a quoted % operator - it is quoted because it can be # used in strings that also have parameter substitution. MOD = '%%' # Bitwise operators - note that these are generated by .bitand() # and .bitor(), the '&' and '|' are reserved for boolean operator # usage. BITAND = '&' BITOR = '|' BITLEFTSHIFT = '<<' BITRIGHTSHIFT = '>>' def _combine(self, other, connector, reversed): if not hasattr(other, 'resolve_expression'): # everything must be resolvable to an expression if isinstance(other, datetime.timedelta): other = DurationValue(other, output_field=fields.DurationField()) else: other = Value(other) if reversed: return CombinedExpression(other, connector, self) return CombinedExpression(self, connector, other) ############# # OPERATORS # ############# def __neg__(self): return self._combine(-1, self.MUL, False) def __add__(self, other): return self._combine(other, self.ADD, False) def __sub__(self, other): return self._combine(other, self.SUB, False) def __mul__(self, other): return self._combine(other, self.MUL, False) def __truediv__(self, other): return self._combine(other, self.DIV, False) def __mod__(self, other): return self._combine(other, self.MOD, False) def __pow__(self, other): return self._combine(other, self.POW, False) def __and__(self, other): raise NotImplementedError( "Use .bitand() and .bitor() for bitwise logical operations." ) def bitand(self, other): return self._combine(other, self.BITAND, False) def bitleftshift(self, other): return self._combine(other, self.BITLEFTSHIFT, False) def bitrightshift(self, other): return self._combine(other, self.BITRIGHTSHIFT, False) def __or__(self, other): raise NotImplementedError( "Use .bitand() and .bitor() for bitwise logical operations." ) def bitor(self, other): return self._combine(other, self.BITOR, False) def __radd__(self, other): return self._combine(other, self.ADD, True) def __rsub__(self, other): return self._combine(other, self.SUB, True) def __rmul__(self, other): return self._combine(other, self.MUL, True) def __rtruediv__(self, other): return self._combine(other, self.DIV, True) def __rmod__(self, other): return self._combine(other, self.MOD, True) def __rpow__(self, other): return self._combine(other, self.POW, True) def __rand__(self, other): raise NotImplementedError( "Use .bitand() and .bitor() for bitwise logical operations." ) def __ror__(self, other): raise NotImplementedError( "Use .bitand() and .bitor() for bitwise logical operations." ) @deconstructible class BaseExpression: """Base class for all query expressions.""" # aggregate specific fields is_summary = False _output_field_resolved_to_none = False # Can the expression be used in a WHERE clause? filterable = True # Can the expression can be used as a source expression in Window? window_compatible = False def __init__(self, output_field=None): if output_field is not None: self.output_field = output_field def __getstate__(self): state = self.__dict__.copy() state.pop('convert_value', None) return state def get_db_converters(self, connection): return ( [] if self.convert_value is self._convert_value_noop else [self.convert_value] ) + self.output_field.get_db_converters(connection) def get_source_expressions(self): return [] def set_source_expressions(self, exprs): assert not exprs def _parse_expressions(self, *expressions): return [ arg if hasattr(arg, 'resolve_expression') else ( F(arg) if isinstance(arg, str) else Value(arg) ) for arg in expressions ] def as_sql(self, compiler, connection): """ Responsible for returning a (sql, [params]) tuple to be included in the current query. Different backends can provide their own implementation, by providing an `as_{vendor}` method and patching the Expression: ``` def override_as_sql(self, compiler, connection): # custom logic return super().as_sql(compiler, connection) setattr(Expression, 'as_' + connection.vendor, override_as_sql) ``` Arguments: * compiler: the query compiler responsible for generating the query. Must have a compile method, returning a (sql, [params]) tuple. Calling compiler(value) will return a quoted `value`. * connection: the database connection used for the current query. Return: (sql, params) Where `sql` is a string containing ordered sql parameters to be replaced with the elements of the list `params`. """ raise NotImplementedError("Subclasses must implement as_sql()") @cached_property def contains_aggregate(self): return any(expr and expr.contains_aggregate for expr in self.get_source_expressions()) @cached_property def contains_over_clause(self): return any(expr and expr.contains_over_clause for expr in self.get_source_expressions()) @cached_property def contains_column_references(self): return any(expr and expr.contains_column_references for expr in self.get_source_expressions()) def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): """ Provide the chance to do any preprocessing or validation before being added to the query. Arguments: * query: the backend query implementation * allow_joins: boolean allowing or denying use of joins in this query * reuse: a set of reusable joins for multijoins * summarize: a terminal aggregate clause * for_save: whether this expression about to be used in a save or update Return: an Expression to be added to the query. """ c = self.copy() c.is_summary = summarize c.set_source_expressions([ expr.resolve_expression(query, allow_joins, reuse, summarize) if expr else None for expr in c.get_source_expressions() ]) return c @property def field(self): return self.output_field @cached_property def output_field(self): """Return the output type of this expressions.""" output_field = self._resolve_output_field() if output_field is None: self._output_field_resolved_to_none = True raise FieldError('Cannot resolve expression type, unknown output_field') return output_field @cached_property def _output_field_or_none(self): """ Return the output field of this expression, or None if _resolve_output_field() didn't return an output type. """ try: return self.output_field except FieldError: if not self._output_field_resolved_to_none: raise def _resolve_output_field(self): """ Attempt to infer the output type of the expression. If the output fields of all source fields match then, simply infer the same type here. This isn't always correct, but it makes sense most of the time. Consider the difference between `2 + 2` and `2 / 3`. Inferring the type here is a convenience for the common case. The user should supply their own output_field with more complex computations. If a source's output field resolves to None, exclude it from this check. If all sources are None, then an error is raised higher up the stack in the output_field property. """ sources_iter = (source for source in self.get_source_fields() if source is not None) for output_field in sources_iter: for source in sources_iter: if not isinstance(output_field, source.__class__): raise FieldError( 'Expression contains mixed types: %s, %s. You must ' 'set output_field.' % ( output_field.__class__.__name__, source.__class__.__name__, ) ) return output_field @staticmethod def _convert_value_noop(value, expression, connection): return value @cached_property def convert_value(self): """ Expressions provide their own converters because users have the option of manually specifying the output_field which may be a different type from the one the database returns. """ field = self.output_field internal_type = field.get_internal_type() if internal_type == 'FloatField': return lambda value, expression, connection: None if value is None else float(value) elif internal_type.endswith('IntegerField'): return lambda value, expression, connection: None if value is None else int(value) elif internal_type == 'DecimalField': return lambda value, expression, connection: None if value is None else Decimal(value) return self._convert_value_noop def get_lookup(self, lookup): return self.output_field.get_lookup(lookup) def get_transform(self, name): return self.output_field.get_transform(name) def relabeled_clone(self, change_map): clone = self.copy() clone.set_source_expressions([ e.relabeled_clone(change_map) if e is not None else None for e in self.get_source_expressions() ]) return clone def copy(self): return copy.copy(self) def get_group_by_cols(self, alias=None): if not self.contains_aggregate: return [self] cols = [] for source in self.get_source_expressions(): cols.extend(source.get_group_by_cols()) return cols def get_source_fields(self): """Return the underlying field types used by this aggregate.""" return [e._output_field_or_none for e in self.get_source_expressions()] def asc(self, **kwargs): return OrderBy(self, **kwargs) def desc(self, **kwargs): return OrderBy(self, descending=True, **kwargs) def reverse_ordering(self): return self def flatten(self): """ Recursively yield this expression and all subexpressions, in depth-first order. """ yield self for expr in self.get_source_expressions(): if expr: yield from expr.flatten() @cached_property def identity(self): constructor_signature = inspect.signature(self.__init__) args, kwargs = self._constructor_args signature = constructor_signature.bind_partial(*args, **kwargs) signature.apply_defaults() arguments = signature.arguments.items() identity = [self.__class__] for arg, value in arguments: if isinstance(value, fields.Field): if value.name and value.model: value = (value.model._meta.label, value.name) else: value = type(value) else: value = make_hashable(value) identity.append((arg, value)) return tuple(identity) def __eq__(self, other): return isinstance(other, BaseExpression) and other.identity == self.identity def __hash__(self): return hash(self.identity) class Expression(BaseExpression, Combinable): """An expression that can be combined with other expressions.""" pass class CombinedExpression(SQLiteNumericMixin, Expression): def __init__(self, lhs, connector, rhs, output_field=None): super().__init__(output_field=output_field) self.connector = connector self.lhs = lhs self.rhs = rhs def __repr__(self): return "<{}: {}>".format(self.__class__.__name__, self) def __str__(self): return "{} {} {}".format(self.lhs, self.connector, self.rhs) def get_source_expressions(self): return [self.lhs, self.rhs] def set_source_expressions(self, exprs): self.lhs, self.rhs = exprs def as_sql(self, compiler, connection): try: lhs_output = self.lhs.output_field except FieldError: lhs_output = None try: rhs_output = self.rhs.output_field except FieldError: rhs_output = None if (not connection.features.has_native_duration_field and ((lhs_output and lhs_output.get_internal_type() == 'DurationField') or (rhs_output and rhs_output.get_internal_type() == 'DurationField'))): return DurationExpression(self.lhs, self.connector, self.rhs).as_sql(compiler, connection) if (lhs_output and rhs_output and self.connector == self.SUB and lhs_output.get_internal_type() in {'DateField', 'DateTimeField', 'TimeField'} and lhs_output.get_internal_type() == rhs_output.get_internal_type()): return TemporalSubtraction(self.lhs, self.rhs).as_sql(compiler, connection) expressions = [] expression_params = [] sql, params = compiler.compile(self.lhs) expressions.append(sql) expression_params.extend(params) sql, params = compiler.compile(self.rhs) expressions.append(sql) expression_params.extend(params) # order of precedence expression_wrapper = '(%s)' sql = connection.ops.combine_expression(self.connector, expressions) return expression_wrapper % sql, expression_params def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): c = self.copy() c.is_summary = summarize c.lhs = c.lhs.resolve_expression(query, allow_joins, reuse, summarize, for_save) c.rhs = c.rhs.resolve_expression(query, allow_joins, reuse, summarize, for_save) return c class DurationExpression(CombinedExpression): def compile(self, side, compiler, connection): if not isinstance(side, DurationValue): try: output = side.output_field except FieldError: pass else: if output.get_internal_type() == 'DurationField': sql, params = compiler.compile(side) return connection.ops.format_for_duration_arithmetic(sql), params return compiler.compile(side) def as_sql(self, compiler, connection): connection.ops.check_expression_support(self) expressions = [] expression_params = [] sql, params = self.compile(self.lhs, compiler, connection) expressions.append(sql) expression_params.extend(params) sql, params = self.compile(self.rhs, compiler, connection) expressions.append(sql) expression_params.extend(params) # order of precedence expression_wrapper = '(%s)' sql = connection.ops.combine_duration_expression(self.connector, expressions) return expression_wrapper % sql, expression_params class TemporalSubtraction(CombinedExpression): output_field = fields.DurationField() def __init__(self, lhs, rhs): super().__init__(lhs, self.SUB, rhs) def as_sql(self, compiler, connection): connection.ops.check_expression_support(self) lhs = compiler.compile(self.lhs, connection) rhs = compiler.compile(self.rhs, connection) return connection.ops.subtract_temporals(self.lhs.output_field.get_internal_type(), lhs, rhs) @deconstructible class F(Combinable): """An object capable of resolving references to existing query objects.""" # Can the expression be used in a WHERE clause? filterable = True def __init__(self, name): """ Arguments: * name: the name of the field this expression references """ self.name = name def __repr__(self): return "{}({})".format(self.__class__.__name__, self.name) def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False, simple_col=False): return query.resolve_ref(self.name, allow_joins, reuse, summarize, simple_col) def asc(self, **kwargs): return OrderBy(self, **kwargs) def desc(self, **kwargs): return OrderBy(self, descending=True, **kwargs) def __eq__(self, other): return self.__class__ == other.__class__ and self.name == other.name def __hash__(self): return hash(self.name) class ResolvedOuterRef(F): """ An object that contains a reference to an outer query. In this case, the reference to the outer query has been resolved because the inner query has been used as a subquery. """ contains_aggregate = False def as_sql(self, *args, **kwargs): raise ValueError( 'This queryset contains a reference to an outer query and may ' 'only be used in a subquery.' ) def relabeled_clone(self, relabels): return self class OuterRef(F): def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False, simple_col=False): if isinstance(self.name, self.__class__): return self.name return ResolvedOuterRef(self.name) class Func(SQLiteNumericMixin, Expression): """An SQL function call.""" function = None template = '%(function)s(%(expressions)s)' arg_joiner = ', ' arity = None # The number of arguments the function accepts. def __init__(self, *expressions, output_field=None, **extra): if self.arity is not None and len(expressions) != self.arity: raise TypeError( "'%s' takes exactly %s %s (%s given)" % ( self.__class__.__name__, self.arity, "argument" if self.arity == 1 else "arguments", len(expressions), ) ) super().__init__(output_field=output_field) self.source_expressions = self._parse_expressions(*expressions) self.extra = extra def __repr__(self): args = self.arg_joiner.join(str(arg) for arg in self.source_expressions) extra = {**self.extra, **self._get_repr_options()} if extra: extra = ', '.join(str(key) + '=' + str(val) for key, val in sorted(extra.items())) return "{}({}, {})".format(self.__class__.__name__, args, extra) return "{}({})".format(self.__class__.__name__, args) def _get_repr_options(self): """Return a dict of extra __init__() options to include in the repr.""" return {} def get_source_expressions(self): return self.source_expressions def set_source_expressions(self, exprs): self.source_expressions = exprs def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): c = self.copy() c.is_summary = summarize for pos, arg in enumerate(c.source_expressions): c.source_expressions[pos] = arg.resolve_expression(query, allow_joins, reuse, summarize, for_save) return c def as_sql(self, compiler, connection, function=None, template=None, arg_joiner=None, **extra_context): connection.ops.check_expression_support(self) sql_parts = [] params = [] for arg in self.source_expressions: arg_sql, arg_params = compiler.compile(arg) sql_parts.append(arg_sql) params.extend(arg_params) data = {**self.extra, **extra_context} # Use the first supplied value in this order: the parameter to this # method, a value supplied in __init__()'s **extra (the value in # `data`), or the value defined on the class. if function is not None: data['function'] = function else: data.setdefault('function', self.function) template = template or data.get('template', self.template) arg_joiner = arg_joiner or data.get('arg_joiner', self.arg_joiner) data['expressions'] = data['field'] = arg_joiner.join(sql_parts) return template % data, params def copy(self): copy = super().copy() copy.source_expressions = self.source_expressions[:] copy.extra = self.extra.copy() return copy class Value(Expression): """Represent a wrapped value as a node within an expression.""" def __init__(self, value, output_field=None): """ Arguments: * value: the value this expression represents. The value will be added into the sql parameter list and properly quoted. * output_field: an instance of the model field type that this expression will return, such as IntegerField() or CharField(). """ super().__init__(output_field=output_field) self.value = value def __repr__(self): return "{}({})".format(self.__class__.__name__, self.value) def as_sql(self, compiler, connection): connection.ops.check_expression_support(self) val = self.value output_field = self._output_field_or_none if output_field is not None: if self.for_save: val = output_field.get_db_prep_save(val, connection=connection) else: val = output_field.get_db_prep_value(val, connection=connection) if hasattr(output_field, 'get_placeholder'): return output_field.get_placeholder(val, compiler, connection), [val] if val is None: # cx_Oracle does not always convert None to the appropriate # NULL type (like in case expressions using numbers), so we # use a literal SQL NULL return 'NULL', [] return '%s', [val] def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): c = super().resolve_expression(query, allow_joins, reuse, summarize, for_save) c.for_save = for_save return c def get_group_by_cols(self, alias=None): return [] class DurationValue(Value): def as_sql(self, compiler, connection): connection.ops.check_expression_support(self) if connection.features.has_native_duration_field: return super().as_sql(compiler, connection) return connection.ops.date_interval_sql(self.value), [] class RawSQL(Expression): def __init__(self, sql, params, output_field=None): if output_field is None: output_field = fields.Field() self.sql, self.params = sql, params super().__init__(output_field=output_field) def __repr__(self): return "{}({}, {})".format(self.__class__.__name__, self.sql, self.params) def as_sql(self, compiler, connection): return '(%s)' % self.sql, self.params def get_group_by_cols(self, alias=None): return [self] def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): # Resolve parents fields used in raw SQL. for parent in query.model._meta.get_parent_list(): for parent_field in parent._meta.local_fields: _, column_name = parent_field.get_attname_column() if column_name.lower() in self.sql.lower(): query.resolve_ref(parent_field.name, allow_joins, reuse, summarize) break return super().resolve_expression(query, allow_joins, reuse, summarize, for_save) class Star(Expression): def __repr__(self): return "'*'" def as_sql(self, compiler, connection): return '*', [] class Random(Expression): output_field = fields.FloatField() def __repr__(self): return "Random()" def as_sql(self, compiler, connection): return connection.ops.random_function_sql(), [] class Col(Expression): contains_column_references = True def __init__(self, alias, target, output_field=None): if output_field is None: output_field = target super().__init__(output_field=output_field) self.alias, self.target = alias, target def __repr__(self): return "{}({}, {})".format( self.__class__.__name__, self.alias, self.target) def as_sql(self, compiler, connection): qn = compiler.quote_name_unless_alias return "%s.%s" % (qn(self.alias), qn(self.target.column)), [] def relabeled_clone(self, relabels): return self.__class__(relabels.get(self.alias, self.alias), self.target, self.output_field) def get_group_by_cols(self, alias=None): return [self] def get_db_converters(self, connection): if self.target == self.output_field: return self.output_field.get_db_converters(connection) return (self.output_field.get_db_converters(connection) + self.target.get_db_converters(connection)) class SimpleCol(Expression): """ Represents the SQL of a column name without the table name. This variant of Col doesn't include the table name (or an alias) to avoid a syntax error in check constraints. """ contains_column_references = True def __init__(self, target, output_field=None): if output_field is None: output_field = target super().__init__(output_field=output_field) self.target = target def __repr__(self): return '{}({})'.format(self.__class__.__name__, self.target) def as_sql(self, compiler, connection): qn = compiler.quote_name_unless_alias return qn(self.target.column), [] def get_group_by_cols(self, alias=None): return [self] def get_db_converters(self, connection): if self.target == self.output_field: return self.output_field.get_db_converters(connection) return ( self.output_field.get_db_converters(connection) + self.target.get_db_converters(connection) ) class Ref(Expression): """ Reference to column alias of the query. For example, Ref('sum_cost') in qs.annotate(sum_cost=Sum('cost')) query. """ def __init__(self, refs, source): super().__init__() self.refs, self.source = refs, source def __repr__(self): return "{}({}, {})".format(self.__class__.__name__, self.refs, self.source) def get_source_expressions(self): return [self.source] def set_source_expressions(self, exprs): self.source, = exprs def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): # The sub-expression `source` has already been resolved, as this is # just a reference to the name of `source`. return self def relabeled_clone(self, relabels): return self def as_sql(self, compiler, connection): return connection.ops.quote_name(self.refs), [] def get_group_by_cols(self, alias=None): return [self] class ExpressionList(Func): """ An expression containing multiple expressions. Can be used to provide a list of expressions as an argument to another expression, like an ordering clause. """ template = '%(expressions)s' def __init__(self, *expressions, **extra): if not expressions: raise ValueError('%s requires at least one expression.' % self.__class__.__name__) super().__init__(*expressions, **extra) def __str__(self): return self.arg_joiner.join(str(arg) for arg in self.source_expressions) class ExpressionWrapper(Expression): """ An expression that can wrap another expression so that it can provide extra context to the inner expression, such as the output_field. """ def __init__(self, expression, output_field): super().__init__(output_field=output_field) self.expression = expression def set_source_expressions(self, exprs): self.expression = exprs[0] def get_source_expressions(self): return [self.expression] def as_sql(self, compiler, connection): return self.expression.as_sql(compiler, connection) def __repr__(self): return "{}({})".format(self.__class__.__name__, self.expression) class When(Expression): template = 'WHEN %(condition)s THEN %(result)s' def __init__(self, condition=None, then=None, **lookups): if lookups and condition is None: condition, lookups = Q(**lookups), None if condition is None or not getattr(condition, 'conditional', False) or lookups: raise TypeError("__init__() takes either a Q object or lookups as keyword arguments") if isinstance(condition, Q) and not condition: raise ValueError("An empty Q() can't be used as a When() condition.") super().__init__(output_field=None) self.condition = condition self.result = self._parse_expressions(then)[0] def __str__(self): return "WHEN %r THEN %r" % (self.condition, self.result) def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self) def get_source_expressions(self): return [self.condition, self.result] def set_source_expressions(self, exprs): self.condition, self.result = exprs def get_source_fields(self): # We're only interested in the fields of the result expressions. return [self.result._output_field_or_none] def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): c = self.copy() c.is_summary = summarize if hasattr(c.condition, 'resolve_expression'): c.condition = c.condition.resolve_expression(query, allow_joins, reuse, summarize, False) c.result = c.result.resolve_expression(query, allow_joins, reuse, summarize, for_save) return c def as_sql(self, compiler, connection, template=None, **extra_context): connection.ops.check_expression_support(self) template_params = extra_context sql_params = [] condition_sql, condition_params = compiler.compile(self.condition) template_params['condition'] = condition_sql sql_params.extend(condition_params) result_sql, result_params = compiler.compile(self.result) template_params['result'] = result_sql sql_params.extend(result_params) template = template or self.template return template % template_params, sql_params def get_group_by_cols(self, alias=None): # This is not a complete expression and cannot be used in GROUP BY. cols = [] for source in self.get_source_expressions(): cols.extend(source.get_group_by_cols()) return cols class Case(Expression): """ An SQL searched CASE expression: CASE WHEN n > 0 THEN 'positive' WHEN n < 0 THEN 'negative' ELSE 'zero' END """ template = 'CASE %(cases)s ELSE %(default)s END' case_joiner = ' ' def __init__(self, *cases, default=None, output_field=None, **extra): if not all(isinstance(case, When) for case in cases): raise TypeError("Positional arguments must all be When objects.") super().__init__(output_field) self.cases = list(cases) self.default = self._parse_expressions(default)[0] self.extra = extra def __str__(self): return "CASE %s, ELSE %r" % (', '.join(str(c) for c in self.cases), self.default) def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self) def get_source_expressions(self): return self.cases + [self.default] def set_source_expressions(self, exprs): *self.cases, self.default = exprs def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): c = self.copy() c.is_summary = summarize for pos, case in enumerate(c.cases): c.cases[pos] = case.resolve_expression(query, allow_joins, reuse, summarize, for_save) c.default = c.default.resolve_expression(query, allow_joins, reuse, summarize, for_save) return c def copy(self): c = super().copy() c.cases = c.cases[:] return c def as_sql(self, compiler, connection, template=None, case_joiner=None, **extra_context): connection.ops.check_expression_support(self) if not self.cases: return compiler.compile(self.default) template_params = {**self.extra, **extra_context} case_parts = [] sql_params = [] for case in self.cases: try: case_sql, case_params = compiler.compile(case) except EmptyResultSet: continue case_parts.append(case_sql) sql_params.extend(case_params) default_sql, default_params = compiler.compile(self.default) if not case_parts: return default_sql, default_params case_joiner = case_joiner or self.case_joiner template_params['cases'] = case_joiner.join(case_parts) template_params['default'] = default_sql sql_params.extend(default_params) template = template or template_params.get('template', self.template) sql = template % template_params if self._output_field_or_none is not None: sql = connection.ops.unification_cast_sql(self.output_field) % sql return sql, sql_params class Subquery(Expression): """ An explicit subquery. It may contain OuterRef() references to the outer query which will be resolved when it is applied to that query. """ template = '(%(subquery)s)' contains_aggregate = False def __init__(self, queryset, output_field=None, **extra): self.query = queryset.query self.extra = extra super().__init__(output_field) def get_source_expressions(self): return [self.query] def set_source_expressions(self, exprs): self.query = exprs[0] def _resolve_output_field(self): return self.query.output_field def copy(self): clone = super().copy() clone.query = clone.query.clone() return clone @property def external_aliases(self): return self.query.external_aliases def as_sql(self, compiler, connection, template=None, **extra_context): connection.ops.check_expression_support(self) template_params = {**self.extra, **extra_context} subquery_sql, sql_params = self.query.as_sql(compiler, connection) template_params['subquery'] = subquery_sql[1:-1] template = template or template_params.get('template', self.template) sql = template % template_params return sql, sql_params def get_group_by_cols(self, alias=None): if alias: return [Ref(alias, self)] return [] class Exists(Subquery): template = 'EXISTS(%(subquery)s)' output_field = fields.BooleanField() def __init__(self, queryset, negated=False, **kwargs): # As a performance optimization, remove ordering since EXISTS doesn't # care about it, just whether or not a row matches. queryset = queryset.order_by() self.negated = negated super().__init__(queryset, **kwargs) def __invert__(self): clone = self.copy() clone.negated = not self.negated return clone def as_sql(self, compiler, connection, template=None, **extra_context): sql, params = super().as_sql(compiler, connection, template, **extra_context) if self.negated: sql = 'NOT {}'.format(sql) return sql, params def as_oracle(self, compiler, connection, template=None, **extra_context): # Oracle doesn't allow EXISTS() in the SELECT list, so wrap it with a # CASE WHEN expression. Change the template since the When expression # requires a left hand side (column) to compare against. sql, params = self.as_sql(compiler, connection, template, **extra_context) sql = 'CASE WHEN {} THEN 1 ELSE 0 END'.format(sql) return sql, params class OrderBy(BaseExpression): template = '%(expression)s %(ordering)s' def __init__(self, expression, descending=False, nulls_first=False, nulls_last=False): if nulls_first and nulls_last: raise ValueError('nulls_first and nulls_last are mutually exclusive') self.nulls_first = nulls_first self.nulls_last = nulls_last self.descending = descending if not hasattr(expression, 'resolve_expression'): raise ValueError('expression must be an expression type') self.expression = expression def __repr__(self): return "{}({}, descending={})".format( self.__class__.__name__, self.expression, self.descending) def set_source_expressions(self, exprs): self.expression = exprs[0] def get_source_expressions(self): return [self.expression] def as_sql(self, compiler, connection, template=None, **extra_context): if not template: if self.nulls_last: template = '%s NULLS LAST' % self.template elif self.nulls_first: template = '%s NULLS FIRST' % self.template connection.ops.check_expression_support(self) expression_sql, params = compiler.compile(self.expression) placeholders = { 'expression': expression_sql, 'ordering': 'DESC' if self.descending else 'ASC', **extra_context, } template = template or self.template params *= template.count('%(expression)s') return (template % placeholders).rstrip(), params def as_sqlite(self, compiler, connection): template = None if self.nulls_last: template = '%(expression)s IS NULL, %(expression)s %(ordering)s' elif self.nulls_first: template = '%(expression)s IS NOT NULL, %(expression)s %(ordering)s' return self.as_sql(compiler, connection, template=template) def as_mysql(self, compiler, connection): template = None if self.nulls_last: template = 'IF(ISNULL(%(expression)s),1,0), %(expression)s %(ordering)s ' elif self.nulls_first: template = 'IF(ISNULL(%(expression)s),0,1), %(expression)s %(ordering)s ' return self.as_sql(compiler, connection, template=template) def get_group_by_cols(self, alias=None): cols = [] for source in self.get_source_expressions(): cols.extend(source.get_group_by_cols()) return cols def reverse_ordering(self): self.descending = not self.descending if self.nulls_first or self.nulls_last: self.nulls_first = not self.nulls_first self.nulls_last = not self.nulls_last return self def asc(self): self.descending = False def desc(self): self.descending = True class Window(Expression): template = '%(expression)s OVER (%(window)s)' # Although the main expression may either be an aggregate or an # expression with an aggregate function, the GROUP BY that will # be introduced in the query as a result is not desired. contains_aggregate = False contains_over_clause = True filterable = False def __init__(self, expression, partition_by=None, order_by=None, frame=None, output_field=None): self.partition_by = partition_by self.order_by = order_by self.frame = frame if not getattr(expression, 'window_compatible', False): raise ValueError( "Expression '%s' isn't compatible with OVER clauses." % expression.__class__.__name__ ) if self.partition_by is not None: if not isinstance(self.partition_by, (tuple, list)): self.partition_by = (self.partition_by,) self.partition_by = ExpressionList(*self.partition_by) if self.order_by is not None: if isinstance(self.order_by, (list, tuple)): self.order_by = ExpressionList(*self.order_by) elif not isinstance(self.order_by, BaseExpression): raise ValueError( 'order_by must be either an Expression or a sequence of ' 'expressions.' ) super().__init__(output_field=output_field) self.source_expression = self._parse_expressions(expression)[0] def _resolve_output_field(self): return self.source_expression.output_field def get_source_expressions(self): return [self.source_expression, self.partition_by, self.order_by, self.frame] def set_source_expressions(self, exprs): self.source_expression, self.partition_by, self.order_by, self.frame = exprs def as_sql(self, compiler, connection, template=None): connection.ops.check_expression_support(self) if not connection.features.supports_over_clause: raise NotSupportedError('This backend does not support window expressions.') expr_sql, params = compiler.compile(self.source_expression) window_sql, window_params = [], [] if self.partition_by is not None: sql_expr, sql_params = self.partition_by.as_sql( compiler=compiler, connection=connection, template='PARTITION BY %(expressions)s', ) window_sql.extend(sql_expr) window_params.extend(sql_params) if self.order_by is not None: window_sql.append(' ORDER BY ') order_sql, order_params = compiler.compile(self.order_by) window_sql.extend(order_sql) window_params.extend(order_params) if self.frame: frame_sql, frame_params = compiler.compile(self.frame) window_sql.append(' ' + frame_sql) window_params.extend(frame_params) params.extend(window_params) template = template or self.template return template % { 'expression': expr_sql, 'window': ''.join(window_sql).strip() }, params def __str__(self): return '{} OVER ({}{}{})'.format( str(self.source_expression), 'PARTITION BY ' + str(self.partition_by) if self.partition_by else '', 'ORDER BY ' + str(self.order_by) if self.order_by else '', str(self.frame or ''), ) def __repr__(self): return '<%s: %s>' % (self.__class__.__name__, self) def get_group_by_cols(self, alias=None): return [] class WindowFrame(Expression): """ Model the frame clause in window expressions. There are two types of frame clauses which are subclasses, however, all processing and validation (by no means intended to be complete) is done here. Thus, providing an end for a frame is optional (the default is UNBOUNDED FOLLOWING, which is the last row in the frame). """ template = '%(frame_type)s BETWEEN %(start)s AND %(end)s' def __init__(self, start=None, end=None): self.start = Value(start) self.end = Value(end) def set_source_expressions(self, exprs): self.start, self.end = exprs def get_source_expressions(self): return [self.start, self.end] def as_sql(self, compiler, connection): connection.ops.check_expression_support(self) start, end = self.window_frame_start_end(connection, self.start.value, self.end.value) return self.template % { 'frame_type': self.frame_type, 'start': start, 'end': end, }, [] def __repr__(self): return '<%s: %s>' % (self.__class__.__name__, self) def get_group_by_cols(self, alias=None): return [] def __str__(self): if self.start.value is not None and self.start.value < 0: start = '%d %s' % (abs(self.start.value), connection.ops.PRECEDING) elif self.start.value is not None and self.start.value == 0: start = connection.ops.CURRENT_ROW else: start = connection.ops.UNBOUNDED_PRECEDING if self.end.value is not None and self.end.value > 0: end = '%d %s' % (self.end.value, connection.ops.FOLLOWING) elif self.end.value is not None and self.end.value == 0: end = connection.ops.CURRENT_ROW else: end = connection.ops.UNBOUNDED_FOLLOWING return self.template % { 'frame_type': self.frame_type, 'start': start, 'end': end, } def window_frame_start_end(self, connection, start, end): raise NotImplementedError('Subclasses must implement window_frame_start_end().') class RowRange(WindowFrame): frame_type = 'ROWS' def window_frame_start_end(self, connection, start, end): return connection.ops.window_frame_rows_start_end(start, end) class ValueRange(WindowFrame): frame_type = 'RANGE' def window_frame_start_end(self, connection, start, end): return connection.ops.window_frame_range_start_end(start, end)
11c0c0ae0c911c9351cd2b022f895061f758d46157e71ed4431efd540e1406a9
import copy import inspect import warnings from functools import partialmethod from itertools import chain from django.apps import apps from django.conf import settings from django.core import checks from django.core.exceptions import ( NON_FIELD_ERRORS, FieldDoesNotExist, FieldError, MultipleObjectsReturned, ObjectDoesNotExist, ValidationError, ) from django.db import ( DEFAULT_DB_ALIAS, DJANGO_VERSION_PICKLE_KEY, DatabaseError, connection, connections, router, transaction, ) from django.db.models.constants import LOOKUP_SEP from django.db.models.constraints import CheckConstraint, UniqueConstraint from django.db.models.deletion import CASCADE, Collector from django.db.models.fields.related import ( ForeignObjectRel, OneToOneField, lazy_related_operation, resolve_relation, ) from django.db.models.manager import Manager from django.db.models.options import Options from django.db.models.query import Q from django.db.models.signals import ( class_prepared, post_init, post_save, pre_init, pre_save, ) from django.db.models.utils import make_model_tuple from django.utils.encoding import force_str from django.utils.text import capfirst, get_text_list from django.utils.translation import gettext_lazy as _ from django.utils.version import get_version class Deferred: def __repr__(self): return '<Deferred field>' def __str__(self): return '<Deferred field>' DEFERRED = Deferred() def subclass_exception(name, bases, module, attached_to): """ Create exception subclass. Used by ModelBase below. The exception is created in a way that allows it to be pickled, assuming that the returned exception class will be added as an attribute to the 'attached_to' class. """ return type(name, bases, { '__module__': module, '__qualname__': '%s.%s' % (attached_to.__qualname__, name), }) def _has_contribute_to_class(value): # Only call contribute_to_class() if it's bound. return not inspect.isclass(value) and hasattr(value, 'contribute_to_class') class ModelBase(type): """Metaclass for all models.""" def __new__(cls, name, bases, attrs, **kwargs): super_new = super().__new__ # Also ensure initialization is only performed for subclasses of Model # (excluding Model class itself). parents = [b for b in bases if isinstance(b, ModelBase)] if not parents: return super_new(cls, name, bases, attrs) # Create the class. module = attrs.pop('__module__') new_attrs = {'__module__': module} classcell = attrs.pop('__classcell__', None) if classcell is not None: new_attrs['__classcell__'] = classcell attr_meta = attrs.pop('Meta', None) # Pass all attrs without a (Django-specific) contribute_to_class() # method to type.__new__() so that they're properly initialized # (i.e. __set_name__()). contributable_attrs = {} for obj_name, obj in list(attrs.items()): if _has_contribute_to_class(obj): contributable_attrs[obj_name] = obj else: new_attrs[obj_name] = obj new_class = super_new(cls, name, bases, new_attrs, **kwargs) abstract = getattr(attr_meta, 'abstract', False) meta = attr_meta or getattr(new_class, 'Meta', None) base_meta = getattr(new_class, '_meta', None) app_label = None # Look for an application configuration to attach the model to. app_config = apps.get_containing_app_config(module) if getattr(meta, 'app_label', None) is None: if app_config is None: if not abstract: raise RuntimeError( "Model class %s.%s doesn't declare an explicit " "app_label and isn't in an application in " "INSTALLED_APPS." % (module, name) ) else: app_label = app_config.label new_class.add_to_class('_meta', Options(meta, app_label)) if not abstract: new_class.add_to_class( 'DoesNotExist', subclass_exception( 'DoesNotExist', tuple( x.DoesNotExist for x in parents if hasattr(x, '_meta') and not x._meta.abstract ) or (ObjectDoesNotExist,), module, attached_to=new_class)) new_class.add_to_class( 'MultipleObjectsReturned', subclass_exception( 'MultipleObjectsReturned', tuple( x.MultipleObjectsReturned for x in parents if hasattr(x, '_meta') and not x._meta.abstract ) or (MultipleObjectsReturned,), module, attached_to=new_class)) if base_meta and not base_meta.abstract: # Non-abstract child classes inherit some attributes from their # non-abstract parent (unless an ABC comes before it in the # method resolution order). if not hasattr(meta, 'ordering'): new_class._meta.ordering = base_meta.ordering if not hasattr(meta, 'get_latest_by'): new_class._meta.get_latest_by = base_meta.get_latest_by is_proxy = new_class._meta.proxy # If the model is a proxy, ensure that the base class # hasn't been swapped out. if is_proxy and base_meta and base_meta.swapped: raise TypeError("%s cannot proxy the swapped model '%s'." % (name, base_meta.swapped)) # Add remaining attributes (those with a contribute_to_class() method) # to the class. for obj_name, obj in contributable_attrs.items(): new_class.add_to_class(obj_name, obj) # All the fields of any type declared on this model new_fields = chain( new_class._meta.local_fields, new_class._meta.local_many_to_many, new_class._meta.private_fields ) field_names = {f.name for f in new_fields} # Basic setup for proxy models. if is_proxy: base = None for parent in [kls for kls in parents if hasattr(kls, '_meta')]: if parent._meta.abstract: if parent._meta.fields: raise TypeError( "Abstract base class containing model fields not " "permitted for proxy model '%s'." % name ) else: continue if base is None: base = parent elif parent._meta.concrete_model is not base._meta.concrete_model: raise TypeError("Proxy model '%s' has more than one non-abstract model base class." % name) if base is None: raise TypeError("Proxy model '%s' has no non-abstract model base class." % name) new_class._meta.setup_proxy(base) new_class._meta.concrete_model = base._meta.concrete_model else: new_class._meta.concrete_model = new_class # Collect the parent links for multi-table inheritance. parent_links = {} for base in reversed([new_class] + parents): # Conceptually equivalent to `if base is Model`. if not hasattr(base, '_meta'): continue # Skip concrete parent classes. if base != new_class and not base._meta.abstract: continue # Locate OneToOneField instances. for field in base._meta.local_fields: if isinstance(field, OneToOneField): related = resolve_relation(new_class, field.remote_field.model) parent_links[make_model_tuple(related)] = field # Track fields inherited from base models. inherited_attributes = set() # Do the appropriate setup for any model parents. for base in new_class.mro(): if base not in parents or not hasattr(base, '_meta'): # Things without _meta aren't functional models, so they're # uninteresting parents. inherited_attributes.update(base.__dict__) continue parent_fields = base._meta.local_fields + base._meta.local_many_to_many if not base._meta.abstract: # Check for clashes between locally declared fields and those # on the base classes. for field in parent_fields: if field.name in field_names: raise FieldError( 'Local field %r in class %r clashes with field of ' 'the same name from base class %r.' % ( field.name, name, base.__name__, ) ) else: inherited_attributes.add(field.name) # Concrete classes... base = base._meta.concrete_model base_key = make_model_tuple(base) if base_key in parent_links: field = parent_links[base_key] elif not is_proxy: attr_name = '%s_ptr' % base._meta.model_name field = OneToOneField( base, on_delete=CASCADE, name=attr_name, auto_created=True, parent_link=True, ) if attr_name in field_names: raise FieldError( "Auto-generated field '%s' in class %r for " "parent_link to base class %r clashes with " "declared field of the same name." % ( attr_name, name, base.__name__, ) ) # Only add the ptr field if it's not already present; # e.g. migrations will already have it specified if not hasattr(new_class, attr_name): new_class.add_to_class(attr_name, field) else: field = None new_class._meta.parents[base] = field else: base_parents = base._meta.parents.copy() # Add fields from abstract base class if it wasn't overridden. for field in parent_fields: if (field.name not in field_names and field.name not in new_class.__dict__ and field.name not in inherited_attributes): new_field = copy.deepcopy(field) new_class.add_to_class(field.name, new_field) # Replace parent links defined on this base by the new # field. It will be appropriately resolved if required. if field.one_to_one: for parent, parent_link in base_parents.items(): if field == parent_link: base_parents[parent] = new_field # Pass any non-abstract parent classes onto child. new_class._meta.parents.update(base_parents) # Inherit private fields (like GenericForeignKey) from the parent # class for field in base._meta.private_fields: if field.name in field_names: if not base._meta.abstract: raise FieldError( 'Local field %r in class %r clashes with field of ' 'the same name from base class %r.' % ( field.name, name, base.__name__, ) ) else: field = copy.deepcopy(field) if not base._meta.abstract: field.mti_inherited = True new_class.add_to_class(field.name, field) # Copy indexes so that index names are unique when models extend an # abstract model. new_class._meta.indexes = [copy.deepcopy(idx) for idx in new_class._meta.indexes] if abstract: # Abstract base models can't be instantiated and don't appear in # the list of models for an app. We do the final setup for them a # little differently from normal models. attr_meta.abstract = False new_class.Meta = attr_meta return new_class new_class._prepare() new_class._meta.apps.register_model(new_class._meta.app_label, new_class) return new_class def add_to_class(cls, name, value): if _has_contribute_to_class(value): value.contribute_to_class(cls, name) else: setattr(cls, name, value) def _prepare(cls): """Create some methods once self._meta has been populated.""" opts = cls._meta opts._prepare(cls) if opts.order_with_respect_to: cls.get_next_in_order = partialmethod(cls._get_next_or_previous_in_order, is_next=True) cls.get_previous_in_order = partialmethod(cls._get_next_or_previous_in_order, is_next=False) # Defer creating accessors on the foreign class until it has been # created and registered. If remote_field is None, we're ordering # with respect to a GenericForeignKey and don't know what the # foreign class is - we'll add those accessors later in # contribute_to_class(). if opts.order_with_respect_to.remote_field: wrt = opts.order_with_respect_to remote = wrt.remote_field.model lazy_related_operation(make_foreign_order_accessors, cls, remote) # Give the class a docstring -- its definition. if cls.__doc__ is None: cls.__doc__ = "%s(%s)" % (cls.__name__, ", ".join(f.name for f in opts.fields)) get_absolute_url_override = settings.ABSOLUTE_URL_OVERRIDES.get(opts.label_lower) if get_absolute_url_override: setattr(cls, 'get_absolute_url', get_absolute_url_override) if not opts.managers: if any(f.name == 'objects' for f in opts.fields): raise ValueError( "Model %s must specify a custom Manager, because it has a " "field named 'objects'." % cls.__name__ ) manager = Manager() manager.auto_created = True cls.add_to_class('objects', manager) # Set the name of _meta.indexes. This can't be done in # Options.contribute_to_class() because fields haven't been added to # the model at that point. for index in cls._meta.indexes: if not index.name: index.set_name_with_model(cls) class_prepared.send(sender=cls) @property def _base_manager(cls): return cls._meta.base_manager @property def _default_manager(cls): return cls._meta.default_manager class ModelStateFieldsCacheDescriptor: def __get__(self, instance, cls=None): if instance is None: return self res = instance.fields_cache = {} return res class ModelState: """Store model instance state.""" db = None # If true, uniqueness validation checks will consider this a new, unsaved # object. Necessary for correct validation of new instances of objects with # explicit (non-auto) PKs. This impacts validation only; it has no effect # on the actual save. adding = True fields_cache = ModelStateFieldsCacheDescriptor() class Model(metaclass=ModelBase): def __init__(self, *args, **kwargs): # Alias some things as locals to avoid repeat global lookups cls = self.__class__ opts = self._meta _setattr = setattr _DEFERRED = DEFERRED pre_init.send(sender=cls, args=args, kwargs=kwargs) # Set up the storage for instance state self._state = ModelState() # There is a rather weird disparity here; if kwargs, it's set, then args # overrides it. It should be one or the other; don't duplicate the work # The reason for the kwargs check is that standard iterator passes in by # args, and instantiation for iteration is 33% faster. if len(args) > len(opts.concrete_fields): # Daft, but matches old exception sans the err msg. raise IndexError("Number of args exceeds number of fields") if not kwargs: fields_iter = iter(opts.concrete_fields) # The ordering of the zip calls matter - zip throws StopIteration # when an iter throws it. So if the first iter throws it, the second # is *not* consumed. We rely on this, so don't change the order # without changing the logic. for val, field in zip(args, fields_iter): if val is _DEFERRED: continue _setattr(self, field.attname, val) else: # Slower, kwargs-ready version. fields_iter = iter(opts.fields) for val, field in zip(args, fields_iter): if val is _DEFERRED: continue _setattr(self, field.attname, val) kwargs.pop(field.name, None) # Now we're left with the unprocessed fields that *must* come from # keywords, or default. for field in fields_iter: is_related_object = False # Virtual field if field.attname not in kwargs and field.column is None: continue if kwargs: if isinstance(field.remote_field, ForeignObjectRel): try: # Assume object instance was passed in. rel_obj = kwargs.pop(field.name) is_related_object = True except KeyError: try: # Object instance wasn't passed in -- must be an ID. val = kwargs.pop(field.attname) except KeyError: val = field.get_default() else: try: val = kwargs.pop(field.attname) except KeyError: # This is done with an exception rather than the # default argument on pop because we don't want # get_default() to be evaluated, and then not used. # Refs #12057. val = field.get_default() else: val = field.get_default() if is_related_object: # If we are passed a related instance, set it using the # field.name instead of field.attname (e.g. "user" instead of # "user_id") so that the object gets properly cached (and type # checked) by the RelatedObjectDescriptor. if rel_obj is not _DEFERRED: _setattr(self, field.name, rel_obj) else: if val is not _DEFERRED: _setattr(self, field.attname, val) if kwargs: property_names = opts._property_names for prop in tuple(kwargs): try: # Any remaining kwargs must correspond to properties or # virtual fields. if prop in property_names or opts.get_field(prop): if kwargs[prop] is not _DEFERRED: _setattr(self, prop, kwargs[prop]) del kwargs[prop] except (AttributeError, FieldDoesNotExist): pass for kwarg in kwargs: raise TypeError("%s() got an unexpected keyword argument '%s'" % (cls.__name__, kwarg)) super().__init__() post_init.send(sender=cls, instance=self) @classmethod def from_db(cls, db, field_names, values): if len(values) != len(cls._meta.concrete_fields): values_iter = iter(values) values = [ next(values_iter) if f.attname in field_names else DEFERRED for f in cls._meta.concrete_fields ] new = cls(*values) new._state.adding = False new._state.db = db return new def __repr__(self): return '<%s: %s>' % (self.__class__.__name__, self) def __str__(self): return '%s object (%s)' % (self.__class__.__name__, self.pk) def __eq__(self, other): if not isinstance(other, Model): return False if self._meta.concrete_model != other._meta.concrete_model: return False my_pk = self.pk if my_pk is None: return self is other return my_pk == other.pk def __hash__(self): if self.pk is None: raise TypeError("Model instances without primary key value are unhashable") return hash(self.pk) def __reduce__(self): data = self.__getstate__() data[DJANGO_VERSION_PICKLE_KEY] = get_version() class_id = self._meta.app_label, self._meta.object_name return model_unpickle, (class_id,), data def __getstate__(self): """Hook to allow choosing the attributes to pickle.""" return self.__dict__ def __setstate__(self, state): msg = None pickled_version = state.get(DJANGO_VERSION_PICKLE_KEY) if pickled_version: current_version = get_version() if current_version != pickled_version: msg = ( "Pickled model instance's Django version %s does not match " "the current version %s." % (pickled_version, current_version) ) else: msg = "Pickled model instance's Django version is not specified." if msg: warnings.warn(msg, RuntimeWarning, stacklevel=2) self.__dict__.update(state) def _get_pk_val(self, meta=None): meta = meta or self._meta return getattr(self, meta.pk.attname) def _set_pk_val(self, value): return setattr(self, self._meta.pk.attname, value) pk = property(_get_pk_val, _set_pk_val) def get_deferred_fields(self): """ Return a set containing names of deferred fields for this instance. """ return { f.attname for f in self._meta.concrete_fields if f.attname not in self.__dict__ } def refresh_from_db(self, using=None, fields=None): """ Reload field values from the database. By default, the reloading happens from the database this instance was loaded from, or by the read router if this instance wasn't loaded from any database. The using parameter will override the default. Fields can be used to specify which fields to reload. The fields should be an iterable of field attnames. If fields is None, then all non-deferred fields are reloaded. When accessing deferred fields of an instance, the deferred loading of the field will call this method. """ if fields is None: self._prefetched_objects_cache = {} else: prefetched_objects_cache = getattr(self, '_prefetched_objects_cache', ()) for field in fields: if field in prefetched_objects_cache: del prefetched_objects_cache[field] fields.remove(field) if not fields: return if any(LOOKUP_SEP in f for f in fields): raise ValueError( 'Found "%s" in fields argument. Relations and transforms ' 'are not allowed in fields.' % LOOKUP_SEP) hints = {'instance': self} db_instance_qs = self.__class__._base_manager.db_manager(using, hints=hints).filter(pk=self.pk) # Use provided fields, if not set then reload all non-deferred fields. deferred_fields = self.get_deferred_fields() if fields is not None: fields = list(fields) db_instance_qs = db_instance_qs.only(*fields) elif deferred_fields: fields = [f.attname for f in self._meta.concrete_fields if f.attname not in deferred_fields] db_instance_qs = db_instance_qs.only(*fields) db_instance = db_instance_qs.get() non_loaded_fields = db_instance.get_deferred_fields() for field in self._meta.concrete_fields: if field.attname in non_loaded_fields: # This field wasn't refreshed - skip ahead. continue setattr(self, field.attname, getattr(db_instance, field.attname)) # Clear cached foreign keys. if field.is_relation and field.is_cached(self): field.delete_cached_value(self) # Clear cached relations. for field in self._meta.related_objects: if field.is_cached(self): field.delete_cached_value(self) self._state.db = db_instance._state.db def serializable_value(self, field_name): """ Return the value of the field name for this instance. If the field is a foreign key, return the id value instead of the object. If there's no Field object with this name on the model, return the model attribute's value. Used to serialize a field's value (in the serializer, or form output, for example). Normally, you would just access the attribute directly and not use this method. """ try: field = self._meta.get_field(field_name) except FieldDoesNotExist: return getattr(self, field_name) return getattr(self, field.attname) def save(self, force_insert=False, force_update=False, using=None, update_fields=None): """ Save the current instance. Override this in a subclass if you want to control the saving process. The 'force_insert' and 'force_update' parameters can be used to insist that the "save" must be an SQL insert or update (or equivalent for non-SQL backends), respectively. Normally, they should not be set. """ # Ensure that a model instance without a PK hasn't been assigned to # a ForeignKey or OneToOneField on this model. If the field is # nullable, allowing the save() would result in silent data loss. for field in self._meta.concrete_fields: # If the related field isn't cached, then an instance hasn't # been assigned and there's no need to worry about this check. if field.is_relation and field.is_cached(self): obj = getattr(self, field.name, None) if not obj: continue # A pk may have been assigned manually to a model instance not # saved to the database (or auto-generated in a case like # UUIDField), but we allow the save to proceed and rely on the # database to raise an IntegrityError if applicable. If # constraints aren't supported by the database, there's the # unavoidable risk of data corruption. if obj.pk is None: # Remove the object from a related instance cache. if not field.remote_field.multiple: field.remote_field.delete_cached_value(obj) raise ValueError( "save() prohibited to prevent data loss due to " "unsaved related object '%s'." % field.name ) elif getattr(self, field.attname) is None: # Use pk from related object if it has been saved after # an assignment. setattr(self, field.attname, obj.pk) # If the relationship's pk/to_field was changed, clear the # cached relationship. if getattr(obj, field.target_field.attname) != getattr(self, field.attname): field.delete_cached_value(self) using = using or router.db_for_write(self.__class__, instance=self) if force_insert and (force_update or update_fields): raise ValueError("Cannot force both insert and updating in model saving.") deferred_fields = self.get_deferred_fields() if update_fields is not None: # If update_fields is empty, skip the save. We do also check for # no-op saves later on for inheritance cases. This bailout is # still needed for skipping signal sending. if not update_fields: return update_fields = frozenset(update_fields) field_names = set() for field in self._meta.fields: if not field.primary_key: field_names.add(field.name) if field.name != field.attname: field_names.add(field.attname) non_model_fields = update_fields.difference(field_names) if non_model_fields: raise ValueError("The following fields do not exist in this " "model or are m2m fields: %s" % ', '.join(non_model_fields)) # If saving to the same database, and this model is deferred, then # automatically do an "update_fields" save on the loaded fields. elif not force_insert and deferred_fields and using == self._state.db: field_names = set() for field in self._meta.concrete_fields: if not field.primary_key and not hasattr(field, 'through'): field_names.add(field.attname) loaded_fields = field_names.difference(deferred_fields) if loaded_fields: update_fields = frozenset(loaded_fields) self.save_base(using=using, force_insert=force_insert, force_update=force_update, update_fields=update_fields) save.alters_data = True def save_base(self, raw=False, force_insert=False, force_update=False, using=None, update_fields=None): """ Handle the parts of saving which should be done only once per save, yet need to be done in raw saves, too. This includes some sanity checks and signal sending. The 'raw' argument is telling save_base not to save any parent models and not to do any changes to the values before save. This is used by fixture loading. """ using = using or router.db_for_write(self.__class__, instance=self) assert not (force_insert and (force_update or update_fields)) assert update_fields is None or update_fields cls = origin = self.__class__ # Skip proxies, but keep the origin as the proxy model. if cls._meta.proxy: cls = cls._meta.concrete_model meta = cls._meta if not meta.auto_created: pre_save.send( sender=origin, instance=self, raw=raw, using=using, update_fields=update_fields, ) # A transaction isn't needed if one query is issued. if meta.parents: context_manager = transaction.atomic(using=using, savepoint=False) else: context_manager = transaction.mark_for_rollback_on_error(using=using) with context_manager: parent_inserted = False if not raw: parent_inserted = self._save_parents(cls, using, update_fields) updated = self._save_table( raw, cls, force_insert or parent_inserted, force_update, using, update_fields, ) # Store the database on which the object was saved self._state.db = using # Once saved, this is no longer a to-be-added instance. self._state.adding = False # Signal that the save is complete if not meta.auto_created: post_save.send( sender=origin, instance=self, created=(not updated), update_fields=update_fields, raw=raw, using=using, ) save_base.alters_data = True def _save_parents(self, cls, using, update_fields): """Save all the parents of cls using values from self.""" meta = cls._meta inserted = False for parent, field in meta.parents.items(): # Make sure the link fields are synced between parent and self. if (field and getattr(self, parent._meta.pk.attname) is None and getattr(self, field.attname) is not None): setattr(self, parent._meta.pk.attname, getattr(self, field.attname)) parent_inserted = self._save_parents(cls=parent, using=using, update_fields=update_fields) updated = self._save_table( cls=parent, using=using, update_fields=update_fields, force_insert=parent_inserted, ) if not updated: inserted = True # Set the parent's PK value to self. if field: setattr(self, field.attname, self._get_pk_val(parent._meta)) # Since we didn't have an instance of the parent handy set # attname directly, bypassing the descriptor. Invalidate # the related object cache, in case it's been accidentally # populated. A fresh instance will be re-built from the # database if necessary. if field.is_cached(self): field.delete_cached_value(self) return inserted def _save_table(self, raw=False, cls=None, force_insert=False, force_update=False, using=None, update_fields=None): """ Do the heavy-lifting involved in saving. Update or insert the data for a single table. """ meta = cls._meta non_pks = [f for f in meta.local_concrete_fields if not f.primary_key] if update_fields: non_pks = [f for f in non_pks if f.name in update_fields or f.attname in update_fields] pk_val = self._get_pk_val(meta) if pk_val is None: pk_val = meta.pk.get_pk_value_on_save(self) setattr(self, meta.pk.attname, pk_val) pk_set = pk_val is not None if not pk_set and (force_update or update_fields): raise ValueError("Cannot force an update in save() with no primary key.") updated = False # If possible, try an UPDATE. If that doesn't update anything, do an INSERT. if pk_set and not force_insert: base_qs = cls._base_manager.using(using) values = [(f, None, (getattr(self, f.attname) if raw else f.pre_save(self, False))) for f in non_pks] forced_update = update_fields or force_update updated = self._do_update(base_qs, using, pk_val, values, update_fields, forced_update) if force_update and not updated: raise DatabaseError("Forced update did not affect any rows.") if update_fields and not updated: raise DatabaseError("Save with update_fields did not affect any rows.") if not updated: if meta.order_with_respect_to: # If this is a model with an order_with_respect_to # autopopulate the _order field field = meta.order_with_respect_to filter_args = field.get_filter_kwargs_for_object(self) order_value = cls._base_manager.using(using).filter(**filter_args).count() self._order = order_value fields = meta.local_concrete_fields if not pk_set: fields = [f for f in fields if f is not meta.auto_field] update_pk = meta.auto_field and not pk_set result = self._do_insert(cls._base_manager, using, fields, update_pk, raw) if update_pk: setattr(self, meta.pk.attname, result) return updated def _do_update(self, base_qs, using, pk_val, values, update_fields, forced_update): """ Try to update the model. Return True if the model was updated (if an update query was done and a matching row was found in the DB). """ filtered = base_qs.filter(pk=pk_val) if not values: # We can end up here when saving a model in inheritance chain where # update_fields doesn't target any field in current model. In that # case we just say the update succeeded. Another case ending up here # is a model with just PK - in that case check that the PK still # exists. return update_fields is not None or filtered.exists() if self._meta.select_on_save and not forced_update: return ( filtered.exists() and # It may happen that the object is deleted from the DB right after # this check, causing the subsequent UPDATE to return zero matching # rows. The same result can occur in some rare cases when the # database returns zero despite the UPDATE being executed # successfully (a row is matched and updated). In order to # distinguish these two cases, the object's existence in the # database is again checked for if the UPDATE query returns 0. (filtered._update(values) > 0 or filtered.exists()) ) return filtered._update(values) > 0 def _do_insert(self, manager, using, fields, update_pk, raw): """ Do an INSERT. If update_pk is defined then this method should return the new pk for the model. """ return manager._insert([self], fields=fields, return_id=update_pk, using=using, raw=raw) def delete(self, using=None, keep_parents=False): using = using or router.db_for_write(self.__class__, instance=self) assert self.pk is not None, ( "%s object can't be deleted because its %s attribute is set to None." % (self._meta.object_name, self._meta.pk.attname) ) collector = Collector(using=using) collector.collect([self], keep_parents=keep_parents) return collector.delete() delete.alters_data = True def _get_FIELD_display(self, field): value = getattr(self, field.attname) # force_str() to coerce lazy strings. return force_str(dict(field.flatchoices).get(value, value), strings_only=True) def _get_next_or_previous_by_FIELD(self, field, is_next, **kwargs): if not self.pk: raise ValueError("get_next/get_previous cannot be used on unsaved objects.") op = 'gt' if is_next else 'lt' order = '' if is_next else '-' param = getattr(self, field.attname) q = Q(**{'%s__%s' % (field.name, op): param}) q = q | Q(**{field.name: param, 'pk__%s' % op: self.pk}) qs = self.__class__._default_manager.using(self._state.db).filter(**kwargs).filter(q).order_by( '%s%s' % (order, field.name), '%spk' % order ) try: return qs[0] except IndexError: raise self.DoesNotExist("%s matching query does not exist." % self.__class__._meta.object_name) def _get_next_or_previous_in_order(self, is_next): cachename = "__%s_order_cache" % is_next if not hasattr(self, cachename): op = 'gt' if is_next else 'lt' order = '_order' if is_next else '-_order' order_field = self._meta.order_with_respect_to filter_args = order_field.get_filter_kwargs_for_object(self) obj = self.__class__._default_manager.filter(**filter_args).filter(**{ '_order__%s' % op: self.__class__._default_manager.values('_order').filter(**{ self._meta.pk.name: self.pk }) }).order_by(order)[:1].get() setattr(self, cachename, obj) return getattr(self, cachename) def prepare_database_save(self, field): if self.pk is None: raise ValueError("Unsaved model instance %r cannot be used in an ORM query." % self) return getattr(self, field.remote_field.get_related_field().attname) def clean(self): """ Hook for doing any extra model-wide validation after clean() has been called on every field by self.clean_fields. Any ValidationError raised by this method will not be associated with a particular field; it will have a special-case association with the field defined by NON_FIELD_ERRORS. """ pass def validate_unique(self, exclude=None): """ Check unique constraints on the model and raise ValidationError if any failed. """ unique_checks, date_checks = self._get_unique_checks(exclude=exclude) errors = self._perform_unique_checks(unique_checks) date_errors = self._perform_date_checks(date_checks) for k, v in date_errors.items(): errors.setdefault(k, []).extend(v) if errors: raise ValidationError(errors) def _get_unique_checks(self, exclude=None): """ Return a list of checks to perform. Since validate_unique() could be called from a ModelForm, some fields may have been excluded; we can't perform a unique check on a model that is missing fields involved in that check. Fields that did not validate should also be excluded, but they need to be passed in via the exclude argument. """ if exclude is None: exclude = [] unique_checks = [] unique_togethers = [(self.__class__, self._meta.unique_together)] constraints = [(self.__class__, self._meta.constraints)] for parent_class in self._meta.get_parent_list(): if parent_class._meta.unique_together: unique_togethers.append((parent_class, parent_class._meta.unique_together)) if parent_class._meta.constraints: constraints.append((parent_class, parent_class._meta.constraints)) for model_class, unique_together in unique_togethers: for check in unique_together: if not any(name in exclude for name in check): # Add the check if the field isn't excluded. unique_checks.append((model_class, tuple(check))) for model_class, model_constraints in constraints: for constraint in model_constraints: if (isinstance(constraint, UniqueConstraint) and # Partial unique constraints can't be validated. constraint.condition is None and not any(name in exclude for name in constraint.fields)): unique_checks.append((model_class, constraint.fields)) # These are checks for the unique_for_<date/year/month>. date_checks = [] # Gather a list of checks for fields declared as unique and add them to # the list of checks. fields_with_class = [(self.__class__, self._meta.local_fields)] for parent_class in self._meta.get_parent_list(): fields_with_class.append((parent_class, parent_class._meta.local_fields)) for model_class, fields in fields_with_class: for f in fields: name = f.name if name in exclude: continue if f.unique: unique_checks.append((model_class, (name,))) if f.unique_for_date and f.unique_for_date not in exclude: date_checks.append((model_class, 'date', name, f.unique_for_date)) if f.unique_for_year and f.unique_for_year not in exclude: date_checks.append((model_class, 'year', name, f.unique_for_year)) if f.unique_for_month and f.unique_for_month not in exclude: date_checks.append((model_class, 'month', name, f.unique_for_month)) return unique_checks, date_checks def _perform_unique_checks(self, unique_checks): errors = {} for model_class, unique_check in unique_checks: # Try to look up an existing object with the same values as this # object's values for all the unique field. lookup_kwargs = {} for field_name in unique_check: f = self._meta.get_field(field_name) lookup_value = getattr(self, f.attname) # TODO: Handle multiple backends with different feature flags. if (lookup_value is None or (lookup_value == '' and connection.features.interprets_empty_strings_as_nulls)): # no value, skip the lookup continue if f.primary_key and not self._state.adding: # no need to check for unique primary key when editing continue lookup_kwargs[str(field_name)] = lookup_value # some fields were skipped, no reason to do the check if len(unique_check) != len(lookup_kwargs): continue qs = model_class._default_manager.filter(**lookup_kwargs) # Exclude the current object from the query if we are editing an # instance (as opposed to creating a new one) # Note that we need to use the pk as defined by model_class, not # self.pk. These can be different fields because model inheritance # allows single model to have effectively multiple primary keys. # Refs #17615. model_class_pk = self._get_pk_val(model_class._meta) if not self._state.adding and model_class_pk is not None: qs = qs.exclude(pk=model_class_pk) if qs.exists(): if len(unique_check) == 1: key = unique_check[0] else: key = NON_FIELD_ERRORS errors.setdefault(key, []).append(self.unique_error_message(model_class, unique_check)) return errors def _perform_date_checks(self, date_checks): errors = {} for model_class, lookup_type, field, unique_for in date_checks: lookup_kwargs = {} # there's a ticket to add a date lookup, we can remove this special # case if that makes it's way in date = getattr(self, unique_for) if date is None: continue if lookup_type == 'date': lookup_kwargs['%s__day' % unique_for] = date.day lookup_kwargs['%s__month' % unique_for] = date.month lookup_kwargs['%s__year' % unique_for] = date.year else: lookup_kwargs['%s__%s' % (unique_for, lookup_type)] = getattr(date, lookup_type) lookup_kwargs[field] = getattr(self, field) qs = model_class._default_manager.filter(**lookup_kwargs) # Exclude the current object from the query if we are editing an # instance (as opposed to creating a new one) if not self._state.adding and self.pk is not None: qs = qs.exclude(pk=self.pk) if qs.exists(): errors.setdefault(field, []).append( self.date_error_message(lookup_type, field, unique_for) ) return errors def date_error_message(self, lookup_type, field_name, unique_for): opts = self._meta field = opts.get_field(field_name) return ValidationError( message=field.error_messages['unique_for_date'], code='unique_for_date', params={ 'model': self, 'model_name': capfirst(opts.verbose_name), 'lookup_type': lookup_type, 'field': field_name, 'field_label': capfirst(field.verbose_name), 'date_field': unique_for, 'date_field_label': capfirst(opts.get_field(unique_for).verbose_name), } ) def unique_error_message(self, model_class, unique_check): opts = model_class._meta params = { 'model': self, 'model_class': model_class, 'model_name': capfirst(opts.verbose_name), 'unique_check': unique_check, } # A unique field if len(unique_check) == 1: field = opts.get_field(unique_check[0]) params['field_label'] = capfirst(field.verbose_name) return ValidationError( message=field.error_messages['unique'], code='unique', params=params, ) # unique_together else: field_labels = [capfirst(opts.get_field(f).verbose_name) for f in unique_check] params['field_labels'] = get_text_list(field_labels, _('and')) return ValidationError( message=_("%(model_name)s with this %(field_labels)s already exists."), code='unique_together', params=params, ) def full_clean(self, exclude=None, validate_unique=True): """ Call clean_fields(), clean(), and validate_unique() on the model. Raise a ValidationError for any errors that occur. """ errors = {} if exclude is None: exclude = [] else: exclude = list(exclude) try: self.clean_fields(exclude=exclude) except ValidationError as e: errors = e.update_error_dict(errors) # Form.clean() is run even if other validation fails, so do the # same with Model.clean() for consistency. try: self.clean() except ValidationError as e: errors = e.update_error_dict(errors) # Run unique checks, but only for fields that passed validation. if validate_unique: for name in errors: if name != NON_FIELD_ERRORS and name not in exclude: exclude.append(name) try: self.validate_unique(exclude=exclude) except ValidationError as e: errors = e.update_error_dict(errors) if errors: raise ValidationError(errors) def clean_fields(self, exclude=None): """ Clean all fields and raise a ValidationError containing a dict of all validation errors if any occur. """ if exclude is None: exclude = [] errors = {} for f in self._meta.fields: if f.name in exclude: continue # Skip validation for empty fields with blank=True. The developer # is responsible for making sure they have a valid value. raw_value = getattr(self, f.attname) if f.blank and raw_value in f.empty_values: continue try: setattr(self, f.attname, f.clean(raw_value, self)) except ValidationError as e: errors[f.name] = e.error_list if errors: raise ValidationError(errors) @classmethod def check(cls, **kwargs): errors = [*cls._check_swappable(), *cls._check_model(), *cls._check_managers(**kwargs)] if not cls._meta.swapped: errors += [ *cls._check_fields(**kwargs), *cls._check_m2m_through_same_relationship(), *cls._check_long_column_names(), ] clash_errors = ( *cls._check_id_field(), *cls._check_field_name_clashes(), *cls._check_model_name_db_lookup_clashes(), *cls._check_property_name_related_field_accessor_clashes(), *cls._check_single_primary_key(), ) errors.extend(clash_errors) # If there are field name clashes, hide consequent column name # clashes. if not clash_errors: errors.extend(cls._check_column_name_clashes()) errors += [ *cls._check_index_together(), *cls._check_unique_together(), *cls._check_indexes(), *cls._check_ordering(), *cls._check_constraints(), ] return errors @classmethod def _check_swappable(cls): """Check if the swapped model exists.""" errors = [] if cls._meta.swapped: try: apps.get_model(cls._meta.swapped) except ValueError: errors.append( checks.Error( "'%s' is not of the form 'app_label.app_name'." % cls._meta.swappable, id='models.E001', ) ) except LookupError: app_label, model_name = cls._meta.swapped.split('.') errors.append( checks.Error( "'%s' references '%s.%s', which has not been " "installed, or is abstract." % ( cls._meta.swappable, app_label, model_name ), id='models.E002', ) ) return errors @classmethod def _check_model(cls): errors = [] if cls._meta.proxy: if cls._meta.local_fields or cls._meta.local_many_to_many: errors.append( checks.Error( "Proxy model '%s' contains model fields." % cls.__name__, id='models.E017', ) ) return errors @classmethod def _check_managers(cls, **kwargs): """Perform all manager checks.""" errors = [] for manager in cls._meta.managers: errors.extend(manager.check(**kwargs)) return errors @classmethod def _check_fields(cls, **kwargs): """Perform all field checks.""" errors = [] for field in cls._meta.local_fields: errors.extend(field.check(**kwargs)) for field in cls._meta.local_many_to_many: errors.extend(field.check(from_model=cls, **kwargs)) return errors @classmethod def _check_m2m_through_same_relationship(cls): """ Check if no relationship model is used by more than one m2m field. """ errors = [] seen_intermediary_signatures = [] fields = cls._meta.local_many_to_many # Skip when the target model wasn't found. fields = (f for f in fields if isinstance(f.remote_field.model, ModelBase)) # Skip when the relationship model wasn't found. fields = (f for f in fields if isinstance(f.remote_field.through, ModelBase)) for f in fields: signature = (f.remote_field.model, cls, f.remote_field.through, f.remote_field.through_fields) if signature in seen_intermediary_signatures: errors.append( checks.Error( "The model has two identical many-to-many relations " "through the intermediate model '%s'." % f.remote_field.through._meta.label, obj=cls, id='models.E003', ) ) else: seen_intermediary_signatures.append(signature) return errors @classmethod def _check_id_field(cls): """Check if `id` field is a primary key.""" fields = [f for f in cls._meta.local_fields if f.name == 'id' and f != cls._meta.pk] # fields is empty or consists of the invalid "id" field if fields and not fields[0].primary_key and cls._meta.pk.name == 'id': return [ checks.Error( "'id' can only be used as a field name if the field also " "sets 'primary_key=True'.", obj=cls, id='models.E004', ) ] else: return [] @classmethod def _check_field_name_clashes(cls): """Forbid field shadowing in multi-table inheritance.""" errors = [] used_fields = {} # name or attname -> field # Check that multi-inheritance doesn't cause field name shadowing. for parent in cls._meta.get_parent_list(): for f in parent._meta.local_fields: clash = used_fields.get(f.name) or used_fields.get(f.attname) or None if clash: errors.append( checks.Error( "The field '%s' from parent model " "'%s' clashes with the field '%s' " "from parent model '%s'." % ( clash.name, clash.model._meta, f.name, f.model._meta ), obj=cls, id='models.E005', ) ) used_fields[f.name] = f used_fields[f.attname] = f # Check that fields defined in the model don't clash with fields from # parents, including auto-generated fields like multi-table inheritance # child accessors. for parent in cls._meta.get_parent_list(): for f in parent._meta.get_fields(): if f not in used_fields: used_fields[f.name] = f for f in cls._meta.local_fields: clash = used_fields.get(f.name) or used_fields.get(f.attname) or None # Note that we may detect clash between user-defined non-unique # field "id" and automatically added unique field "id", both # defined at the same model. This special case is considered in # _check_id_field and here we ignore it. id_conflict = f.name == "id" and clash and clash.name == "id" and clash.model == cls if clash and not id_conflict: errors.append( checks.Error( "The field '%s' clashes with the field '%s' " "from model '%s'." % ( f.name, clash.name, clash.model._meta ), obj=f, id='models.E006', ) ) used_fields[f.name] = f used_fields[f.attname] = f return errors @classmethod def _check_column_name_clashes(cls): # Store a list of column names which have already been used by other fields. used_column_names = [] errors = [] for f in cls._meta.local_fields: _, column_name = f.get_attname_column() # Ensure the column name is not already in use. if column_name and column_name in used_column_names: errors.append( checks.Error( "Field '%s' has column name '%s' that is used by " "another field." % (f.name, column_name), hint="Specify a 'db_column' for the field.", obj=cls, id='models.E007' ) ) else: used_column_names.append(column_name) return errors @classmethod def _check_model_name_db_lookup_clashes(cls): errors = [] model_name = cls.__name__ if model_name.startswith('_') or model_name.endswith('_'): errors.append( checks.Error( "The model name '%s' cannot start or end with an underscore " "as it collides with the query lookup syntax." % model_name, obj=cls, id='models.E023' ) ) elif LOOKUP_SEP in model_name: errors.append( checks.Error( "The model name '%s' cannot contain double underscores as " "it collides with the query lookup syntax." % model_name, obj=cls, id='models.E024' ) ) return errors @classmethod def _check_property_name_related_field_accessor_clashes(cls): errors = [] property_names = cls._meta._property_names related_field_accessors = ( f.get_attname() for f in cls._meta._get_fields(reverse=False) if f.is_relation and f.related_model is not None ) for accessor in related_field_accessors: if accessor in property_names: errors.append( checks.Error( "The property '%s' clashes with a related field " "accessor." % accessor, obj=cls, id='models.E025', ) ) return errors @classmethod def _check_single_primary_key(cls): errors = [] if sum(1 for f in cls._meta.local_fields if f.primary_key) > 1: errors.append( checks.Error( "The model cannot have more than one field with " "'primary_key=True'.", obj=cls, id='models.E026', ) ) return errors @classmethod def _check_index_together(cls): """Check the value of "index_together" option.""" if not isinstance(cls._meta.index_together, (tuple, list)): return [ checks.Error( "'index_together' must be a list or tuple.", obj=cls, id='models.E008', ) ] elif any(not isinstance(fields, (tuple, list)) for fields in cls._meta.index_together): return [ checks.Error( "All 'index_together' elements must be lists or tuples.", obj=cls, id='models.E009', ) ] else: errors = [] for fields in cls._meta.index_together: errors.extend(cls._check_local_fields(fields, "index_together")) return errors @classmethod def _check_unique_together(cls): """Check the value of "unique_together" option.""" if not isinstance(cls._meta.unique_together, (tuple, list)): return [ checks.Error( "'unique_together' must be a list or tuple.", obj=cls, id='models.E010', ) ] elif any(not isinstance(fields, (tuple, list)) for fields in cls._meta.unique_together): return [ checks.Error( "All 'unique_together' elements must be lists or tuples.", obj=cls, id='models.E011', ) ] else: errors = [] for fields in cls._meta.unique_together: errors.extend(cls._check_local_fields(fields, "unique_together")) return errors @classmethod def _check_indexes(cls): """Check the fields and names of indexes.""" errors = [] for index in cls._meta.indexes: # Index name can't start with an underscore or a number, restricted # for cross-database compatibility with Oracle. if index.name[0] == '_' or index.name[0].isdigit(): errors.append( checks.Error( "The index name '%s' cannot start with an underscore " "or a number." % index.name, obj=cls, id='models.E033', ), ) if len(index.name) > index.max_name_length: errors.append( checks.Error( "The index name '%s' cannot be longer than %d " "characters." % (index.name, index.max_name_length), obj=cls, id='models.E034', ), ) fields = [field for index in cls._meta.indexes for field, _ in index.fields_orders] errors.extend(cls._check_local_fields(fields, 'indexes')) return errors @classmethod def _check_local_fields(cls, fields, option): from django.db import models # In order to avoid hitting the relation tree prematurely, we use our # own fields_map instead of using get_field() forward_fields_map = {} for field in cls._meta._get_fields(reverse=False): forward_fields_map[field.name] = field if hasattr(field, 'attname'): forward_fields_map[field.attname] = field errors = [] for field_name in fields: try: field = forward_fields_map[field_name] except KeyError: errors.append( checks.Error( "'%s' refers to the nonexistent field '%s'." % ( option, field_name, ), obj=cls, id='models.E012', ) ) else: if isinstance(field.remote_field, models.ManyToManyRel): errors.append( checks.Error( "'%s' refers to a ManyToManyField '%s', but " "ManyToManyFields are not permitted in '%s'." % ( option, field_name, option, ), obj=cls, id='models.E013', ) ) elif field not in cls._meta.local_fields: errors.append( checks.Error( "'%s' refers to field '%s' which is not local to model '%s'." % (option, field_name, cls._meta.object_name), hint="This issue may be caused by multi-table inheritance.", obj=cls, id='models.E016', ) ) return errors @classmethod def _check_ordering(cls): """ Check "ordering" option -- is it a list of strings and do all fields exist? """ if cls._meta._ordering_clash: return [ checks.Error( "'ordering' and 'order_with_respect_to' cannot be used together.", obj=cls, id='models.E021', ), ] if cls._meta.order_with_respect_to or not cls._meta.ordering: return [] if not isinstance(cls._meta.ordering, (list, tuple)): return [ checks.Error( "'ordering' must be a tuple or list (even if you want to order by only one field).", obj=cls, id='models.E014', ) ] errors = [] fields = cls._meta.ordering # Skip expressions and '?' fields. fields = (f for f in fields if isinstance(f, str) and f != '?') # Convert "-field" to "field". fields = ((f[1:] if f.startswith('-') else f) for f in fields) # Separate related fields and non-related fields. _fields = [] related_fields = [] for f in fields: if LOOKUP_SEP in f: related_fields.append(f) else: _fields.append(f) fields = _fields # Check related fields. for field in related_fields: _cls = cls fld = None for part in field.split(LOOKUP_SEP): try: fld = _cls._meta.get_field(part) if fld.is_relation: _cls = fld.get_path_info()[-1].to_opts.model except (FieldDoesNotExist, AttributeError): if fld is None or fld.get_transform(part) is None: errors.append( checks.Error( "'ordering' refers to the nonexistent field, " "related field, or lookup '%s'." % field, obj=cls, id='models.E015', ) ) # Skip ordering on pk. This is always a valid order_by field # but is an alias and therefore won't be found by opts.get_field. fields = {f for f in fields if f != 'pk'} # Check for invalid or nonexistent fields in ordering. invalid_fields = [] # Any field name that is not present in field_names does not exist. # Also, ordering by m2m fields is not allowed. opts = cls._meta valid_fields = set(chain.from_iterable( (f.name, f.attname) if not (f.auto_created and not f.concrete) else (f.field.related_query_name(),) for f in chain(opts.fields, opts.related_objects) )) invalid_fields.extend(fields - valid_fields) for invalid_field in invalid_fields: errors.append( checks.Error( "'ordering' refers to the nonexistent field, related " "field, or lookup '%s'." % invalid_field, obj=cls, id='models.E015', ) ) return errors @classmethod def _check_long_column_names(cls): """ Check that any auto-generated column names are shorter than the limits for each database in which the model will be created. """ errors = [] allowed_len = None db_alias = None # Find the minimum max allowed length among all specified db_aliases. for db in settings.DATABASES: # skip databases where the model won't be created if not router.allow_migrate_model(db, cls): continue connection = connections[db] max_name_length = connection.ops.max_name_length() if max_name_length is None or connection.features.truncates_names: continue else: if allowed_len is None: allowed_len = max_name_length db_alias = db elif max_name_length < allowed_len: allowed_len = max_name_length db_alias = db if allowed_len is None: return errors for f in cls._meta.local_fields: _, column_name = f.get_attname_column() # Check if auto-generated name for the field is too long # for the database. if f.db_column is None and column_name is not None and len(column_name) > allowed_len: errors.append( checks.Error( 'Autogenerated column name too long for field "%s". ' 'Maximum length is "%s" for database "%s".' % (column_name, allowed_len, db_alias), hint="Set the column name manually using 'db_column'.", obj=cls, id='models.E018', ) ) for f in cls._meta.local_many_to_many: # Skip nonexistent models. if isinstance(f.remote_field.through, str): continue # Check if auto-generated name for the M2M field is too long # for the database. for m2m in f.remote_field.through._meta.local_fields: _, rel_name = m2m.get_attname_column() if m2m.db_column is None and rel_name is not None and len(rel_name) > allowed_len: errors.append( checks.Error( 'Autogenerated column name too long for M2M field ' '"%s". Maximum length is "%s" for database "%s".' % (rel_name, allowed_len, db_alias), hint=( "Use 'through' to create a separate model for " "M2M and then set column_name using 'db_column'." ), obj=cls, id='models.E019', ) ) return errors @classmethod def _check_constraints(cls): errors = [] for db in settings.DATABASES: if not router.allow_migrate_model(db, cls): continue connection = connections[db] if connection.features.supports_table_check_constraints: continue if any(isinstance(constraint, CheckConstraint) for constraint in cls._meta.constraints): errors.append( checks.Warning( '%s does not support check constraints.' % connection.display_name, hint=( "A constraint won't be created. Silence this " "warning if you don't care about it." ), obj=cls, id='models.W027', ) ) return errors ############################################ # HELPER FUNCTIONS (CURRIED MODEL METHODS) # ############################################ # ORDERING METHODS ######################### def method_set_order(self, ordered_obj, id_list, using=None): if using is None: using = DEFAULT_DB_ALIAS order_wrt = ordered_obj._meta.order_with_respect_to filter_args = order_wrt.get_forward_related_filter(self) ordered_obj.objects.db_manager(using).filter(**filter_args).bulk_update([ ordered_obj(pk=pk, _order=order) for order, pk in enumerate(id_list) ], ['_order']) def method_get_order(self, ordered_obj): order_wrt = ordered_obj._meta.order_with_respect_to filter_args = order_wrt.get_forward_related_filter(self) pk_name = ordered_obj._meta.pk.name return ordered_obj.objects.filter(**filter_args).values_list(pk_name, flat=True) def make_foreign_order_accessors(model, related_model): setattr( related_model, 'get_%s_order' % model.__name__.lower(), partialmethod(method_get_order, model) ) setattr( related_model, 'set_%s_order' % model.__name__.lower(), partialmethod(method_set_order, model) ) ######## # MISC # ######## def model_unpickle(model_id): """Used to unpickle Model subclasses with deferred fields.""" if isinstance(model_id, tuple): model = apps.get_model(*model_id) else: # Backwards compat - the model was cached directly in earlier versions. model = model_id return model.__new__(model) model_unpickle.__safe_for_unpickle__ = True
abc48a01047134128b1b5c0574b3b8e2f7220fd76db6e5a4be6c26740ce2abc0
import collections.abc import copy import datetime import decimal import operator import uuid import warnings from base64 import b64decode, b64encode from functools import partialmethod, total_ordering from django import forms from django.apps import apps from django.conf import settings from django.core import checks, exceptions, validators # When the _meta object was formalized, this exception was moved to # django.core.exceptions. It is retained here for backwards compatibility # purposes. from django.core.exceptions import FieldDoesNotExist # NOQA from django.db import connection, connections, router from django.db.models.constants import LOOKUP_SEP from django.db.models.query_utils import DeferredAttribute, RegisterLookupMixin from django.utils import timezone from django.utils.datastructures import DictWrapper from django.utils.dateparse import ( parse_date, parse_datetime, parse_duration, parse_time, ) from django.utils.duration import duration_microseconds, duration_string from django.utils.functional import Promise, cached_property from django.utils.ipv6 import clean_ipv6_address from django.utils.itercompat import is_iterable from django.utils.text import capfirst from django.utils.translation import gettext_lazy as _ __all__ = [ 'AutoField', 'BLANK_CHOICE_DASH', 'BigAutoField', 'BigIntegerField', 'BinaryField', 'BooleanField', 'CharField', 'CommaSeparatedIntegerField', 'DateField', 'DateTimeField', 'DecimalField', 'DurationField', 'EmailField', 'Empty', 'Field', 'FieldDoesNotExist', 'FilePathField', 'FloatField', 'GenericIPAddressField', 'IPAddressField', 'IntegerField', 'NOT_PROVIDED', 'NullBooleanField', 'PositiveIntegerField', 'PositiveSmallIntegerField', 'SlugField', 'SmallIntegerField', 'TextField', 'TimeField', 'URLField', 'UUIDField', ] class Empty: pass class NOT_PROVIDED: pass # The values to use for "blank" in SelectFields. Will be appended to the start # of most "choices" lists. BLANK_CHOICE_DASH = [("", "---------")] def _load_field(app_label, model_name, field_name): return apps.get_model(app_label, model_name)._meta.get_field(field_name) # A guide to Field parameters: # # * name: The name of the field specified in the model. # * attname: The attribute to use on the model object. This is the same as # "name", except in the case of ForeignKeys, where "_id" is # appended. # * db_column: The db_column specified in the model (or None). # * column: The database column for this field. This is the same as # "attname", except if db_column is specified. # # Code that introspects values, or does other dynamic things, should use # attname. For example, this gets the primary key value of object "obj": # # getattr(obj, opts.pk.attname) def _empty(of_cls): new = Empty() new.__class__ = of_cls return new def return_None(): return None @total_ordering class Field(RegisterLookupMixin): """Base class for all field types""" # Designates whether empty strings fundamentally are allowed at the # database level. empty_strings_allowed = True empty_values = list(validators.EMPTY_VALUES) # These track each time a Field instance is created. Used to retain order. # The auto_creation_counter is used for fields that Django implicitly # creates, creation_counter is used for all user-specified fields. creation_counter = 0 auto_creation_counter = -1 default_validators = [] # Default set of validators default_error_messages = { 'invalid_choice': _('Value %(value)r is not a valid choice.'), 'null': _('This field cannot be null.'), 'blank': _('This field cannot be blank.'), 'unique': _('%(model_name)s with this %(field_label)s ' 'already exists.'), # Translators: The 'lookup_type' is one of 'date', 'year' or 'month'. # Eg: "Title must be unique for pub_date year" 'unique_for_date': _("%(field_label)s must be unique for " "%(date_field_label)s %(lookup_type)s."), } system_check_deprecated_details = None system_check_removed_details = None # Field flags hidden = False many_to_many = None many_to_one = None one_to_many = None one_to_one = None related_model = None descriptor_class = DeferredAttribute # Generic field type description, usually overridden by subclasses def _description(self): return _('Field of type: %(field_type)s') % { 'field_type': self.__class__.__name__ } description = property(_description) def __init__(self, verbose_name=None, name=None, primary_key=False, max_length=None, unique=False, blank=False, null=False, db_index=False, rel=None, default=NOT_PROVIDED, editable=True, serialize=True, unique_for_date=None, unique_for_month=None, unique_for_year=None, choices=None, help_text='', db_column=None, db_tablespace=None, auto_created=False, validators=(), error_messages=None): self.name = name self.verbose_name = verbose_name # May be set by set_attributes_from_name self._verbose_name = verbose_name # Store original for deconstruction self.primary_key = primary_key self.max_length, self._unique = max_length, unique self.blank, self.null = blank, null self.remote_field = rel self.is_relation = self.remote_field is not None self.default = default self.editable = editable self.serialize = serialize self.unique_for_date = unique_for_date self.unique_for_month = unique_for_month self.unique_for_year = unique_for_year if isinstance(choices, collections.abc.Iterator): choices = list(choices) self.choices = choices self.help_text = help_text self.db_index = db_index self.db_column = db_column self._db_tablespace = db_tablespace self.auto_created = auto_created # Adjust the appropriate creation counter, and save our local copy. if auto_created: self.creation_counter = Field.auto_creation_counter Field.auto_creation_counter -= 1 else: self.creation_counter = Field.creation_counter Field.creation_counter += 1 self._validators = list(validators) # Store for deconstruction later messages = {} for c in reversed(self.__class__.__mro__): messages.update(getattr(c, 'default_error_messages', {})) messages.update(error_messages or {}) self._error_messages = error_messages # Store for deconstruction later self.error_messages = messages def __str__(self): """ Return "app_label.model_label.field_name" for fields attached to models. """ if not hasattr(self, 'model'): return super().__str__() model = self.model app = model._meta.app_label return '%s.%s.%s' % (app, model._meta.object_name, self.name) def __repr__(self): """Display the module, class, and name of the field.""" path = '%s.%s' % (self.__class__.__module__, self.__class__.__qualname__) name = getattr(self, 'name', None) if name is not None: return '<%s: %s>' % (path, name) return '<%s>' % path def check(self, **kwargs): return [ *self._check_field_name(), *self._check_choices(), *self._check_db_index(), *self._check_null_allowed_for_primary_keys(), *self._check_backend_specific_checks(**kwargs), *self._check_validators(), *self._check_deprecation_details(), ] def _check_field_name(self): """ Check if field name is valid, i.e. 1) does not end with an underscore, 2) does not contain "__" and 3) is not "pk". """ if self.name.endswith('_'): return [ checks.Error( 'Field names must not end with an underscore.', obj=self, id='fields.E001', ) ] elif LOOKUP_SEP in self.name: return [ checks.Error( 'Field names must not contain "%s".' % (LOOKUP_SEP,), obj=self, id='fields.E002', ) ] elif self.name == 'pk': return [ checks.Error( "'pk' is a reserved word that cannot be used as a field name.", obj=self, id='fields.E003', ) ] else: return [] def _check_choices(self): if not self.choices: return [] def is_value(value, accept_promise=True): return isinstance(value, (str, Promise) if accept_promise else str) or not is_iterable(value) if is_value(self.choices, accept_promise=False): return [ checks.Error( "'choices' must be an iterable (e.g., a list or tuple).", obj=self, id='fields.E004', ) ] # Expect [group_name, [value, display]] for choices_group in self.choices: try: group_name, group_choices = choices_group except (TypeError, ValueError): # Containing non-pairs break try: if not all( is_value(value) and is_value(human_name) for value, human_name in group_choices ): break except (TypeError, ValueError): # No groups, choices in the form [value, display] value, human_name = group_name, group_choices if not is_value(value) or not is_value(human_name): break # Special case: choices=['ab'] if isinstance(choices_group, str): break else: return [] return [ checks.Error( "'choices' must be an iterable containing " "(actual value, human readable name) tuples.", obj=self, id='fields.E005', ) ] def _check_db_index(self): if self.db_index not in (None, True, False): return [ checks.Error( "'db_index' must be None, True or False.", obj=self, id='fields.E006', ) ] else: return [] def _check_null_allowed_for_primary_keys(self): if (self.primary_key and self.null and not connection.features.interprets_empty_strings_as_nulls): # We cannot reliably check this for backends like Oracle which # consider NULL and '' to be equal (and thus set up # character-based fields a little differently). return [ checks.Error( 'Primary keys must not have null=True.', hint=('Set null=False on the field, or ' 'remove primary_key=True argument.'), obj=self, id='fields.E007', ) ] else: return [] def _check_backend_specific_checks(self, **kwargs): app_label = self.model._meta.app_label for db in connections: if router.allow_migrate(db, app_label, model_name=self.model._meta.model_name): return connections[db].validation.check_field(self, **kwargs) return [] def _check_validators(self): errors = [] for i, validator in enumerate(self.validators): if not callable(validator): errors.append( checks.Error( "All 'validators' must be callable.", hint=( "validators[{i}] ({repr}) isn't a function or " "instance of a validator class.".format( i=i, repr=repr(validator), ) ), obj=self, id='fields.E008', ) ) return errors def _check_deprecation_details(self): if self.system_check_removed_details is not None: return [ checks.Error( self.system_check_removed_details.get( 'msg', '%s has been removed except for support in historical ' 'migrations.' % self.__class__.__name__ ), hint=self.system_check_removed_details.get('hint'), obj=self, id=self.system_check_removed_details.get('id', 'fields.EXXX'), ) ] elif self.system_check_deprecated_details is not None: return [ checks.Warning( self.system_check_deprecated_details.get( 'msg', '%s has been deprecated.' % self.__class__.__name__ ), hint=self.system_check_deprecated_details.get('hint'), obj=self, id=self.system_check_deprecated_details.get('id', 'fields.WXXX'), ) ] return [] def get_col(self, alias, output_field=None): if output_field is None: output_field = self if alias != self.model._meta.db_table or output_field != self: from django.db.models.expressions import Col return Col(alias, self, output_field) else: return self.cached_col @cached_property def cached_col(self): from django.db.models.expressions import Col return Col(self.model._meta.db_table, self) def select_format(self, compiler, sql, params): """ Custom format for select clauses. For example, GIS columns need to be selected as AsText(table.col) on MySQL as the table.col data can't be used by Django. """ return sql, params def deconstruct(self): """ Return enough information to recreate the field as a 4-tuple: * The name of the field on the model, if contribute_to_class() has been run. * The import path of the field, including the class:e.g. django.db.models.IntegerField This should be the most portable version, so less specific may be better. * A list of positional arguments. * A dict of keyword arguments. Note that the positional or keyword arguments must contain values of the following types (including inner values of collection types): * None, bool, str, int, float, complex, set, frozenset, list, tuple, dict * UUID * datetime.datetime (naive), datetime.date * top-level classes, top-level functions - will be referenced by their full import path * Storage instances - these have their own deconstruct() method This is because the values here must be serialized into a text format (possibly new Python code, possibly JSON) and these are the only types with encoding handlers defined. There's no need to return the exact way the field was instantiated this time, just ensure that the resulting field is the same - prefer keyword arguments over positional ones, and omit parameters with their default values. """ # Short-form way of fetching all the default parameters keywords = {} possibles = { "verbose_name": None, "primary_key": False, "max_length": None, "unique": False, "blank": False, "null": False, "db_index": False, "default": NOT_PROVIDED, "editable": True, "serialize": True, "unique_for_date": None, "unique_for_month": None, "unique_for_year": None, "choices": None, "help_text": '', "db_column": None, "db_tablespace": None, "auto_created": False, "validators": [], "error_messages": None, } attr_overrides = { "unique": "_unique", "error_messages": "_error_messages", "validators": "_validators", "verbose_name": "_verbose_name", "db_tablespace": "_db_tablespace", } equals_comparison = {"choices", "validators"} for name, default in possibles.items(): value = getattr(self, attr_overrides.get(name, name)) # Unroll anything iterable for choices into a concrete list if name == "choices" and isinstance(value, collections.abc.Iterable): value = list(value) # Do correct kind of comparison if name in equals_comparison: if value != default: keywords[name] = value else: if value is not default: keywords[name] = value # Work out path - we shorten it for known Django core fields path = "%s.%s" % (self.__class__.__module__, self.__class__.__qualname__) if path.startswith("django.db.models.fields.related"): path = path.replace("django.db.models.fields.related", "django.db.models") if path.startswith("django.db.models.fields.files"): path = path.replace("django.db.models.fields.files", "django.db.models") if path.startswith("django.db.models.fields.proxy"): path = path.replace("django.db.models.fields.proxy", "django.db.models") if path.startswith("django.db.models.fields"): path = path.replace("django.db.models.fields", "django.db.models") # Return basic info - other fields should override this. return (self.name, path, [], keywords) def clone(self): """ Uses deconstruct() to clone a new copy of this Field. Will not preserve any class attachments/attribute names. """ name, path, args, kwargs = self.deconstruct() return self.__class__(*args, **kwargs) def __eq__(self, other): # Needed for @total_ordering if isinstance(other, Field): return self.creation_counter == other.creation_counter return NotImplemented def __lt__(self, other): # This is needed because bisect does not take a comparison function. if isinstance(other, Field): return self.creation_counter < other.creation_counter return NotImplemented def __hash__(self): return hash(self.creation_counter) def __deepcopy__(self, memodict): # We don't have to deepcopy very much here, since most things are not # intended to be altered after initial creation. obj = copy.copy(self) if self.remote_field: obj.remote_field = copy.copy(self.remote_field) if hasattr(self.remote_field, 'field') and self.remote_field.field is self: obj.remote_field.field = obj memodict[id(self)] = obj return obj def __copy__(self): # We need to avoid hitting __reduce__, so define this # slightly weird copy construct. obj = Empty() obj.__class__ = self.__class__ obj.__dict__ = self.__dict__.copy() return obj def __reduce__(self): """ Pickling should return the model._meta.fields instance of the field, not a new copy of that field. So, use the app registry to load the model and then the field back. """ if not hasattr(self, 'model'): # Fields are sometimes used without attaching them to models (for # example in aggregation). In this case give back a plain field # instance. The code below will create a new empty instance of # class self.__class__, then update its dict with self.__dict__ # values - so, this is very close to normal pickle. state = self.__dict__.copy() # The _get_default cached_property can't be pickled due to lambda # usage. state.pop('_get_default', None) return _empty, (self.__class__,), state return _load_field, (self.model._meta.app_label, self.model._meta.object_name, self.name) def get_pk_value_on_save(self, instance): """ Hook to generate new PK values on save. This method is called when saving instances with no primary key value set. If this method returns something else than None, then the returned value is used when saving the new instance. """ if self.default: return self.get_default() return None def to_python(self, value): """ Convert the input value into the expected Python data type, raising django.core.exceptions.ValidationError if the data can't be converted. Return the converted value. Subclasses should override this. """ return value @cached_property def validators(self): """ Some validators can't be created at field initialization time. This method provides a way to delay their creation until required. """ return [*self.default_validators, *self._validators] def run_validators(self, value): if value in self.empty_values: return errors = [] for v in self.validators: try: v(value) except exceptions.ValidationError as e: if hasattr(e, 'code') and e.code in self.error_messages: e.message = self.error_messages[e.code] errors.extend(e.error_list) if errors: raise exceptions.ValidationError(errors) def validate(self, value, model_instance): """ Validate value and raise ValidationError if necessary. Subclasses should override this to provide validation logic. """ if not self.editable: # Skip validation for non-editable fields. return if self.choices is not None and value not in self.empty_values: for option_key, option_value in self.choices: if isinstance(option_value, (list, tuple)): # This is an optgroup, so look inside the group for # options. for optgroup_key, optgroup_value in option_value: if value == optgroup_key: return elif value == option_key: return raise exceptions.ValidationError( self.error_messages['invalid_choice'], code='invalid_choice', params={'value': value}, ) if value is None and not self.null: raise exceptions.ValidationError(self.error_messages['null'], code='null') if not self.blank and value in self.empty_values: raise exceptions.ValidationError(self.error_messages['blank'], code='blank') def clean(self, value, model_instance): """ Convert the value's type and run validation. Validation errors from to_python() and validate() are propagated. Return the correct value if no error is raised. """ value = self.to_python(value) self.validate(value, model_instance) self.run_validators(value) return value def db_type_parameters(self, connection): return DictWrapper(self.__dict__, connection.ops.quote_name, 'qn_') def db_check(self, connection): """ Return the database column check constraint for this field, for the provided connection. Works the same way as db_type() for the case that get_internal_type() does not map to a preexisting model field. """ data = self.db_type_parameters(connection) try: return connection.data_type_check_constraints[self.get_internal_type()] % data except KeyError: return None def db_type(self, connection): """ Return the database column data type for this field, for the provided connection. """ # The default implementation of this method looks at the # backend-specific data_types dictionary, looking up the field by its # "internal type". # # A Field class can implement the get_internal_type() method to specify # which *preexisting* Django Field class it's most similar to -- i.e., # a custom field might be represented by a TEXT column type, which is # the same as the TextField Django field type, which means the custom # field's get_internal_type() returns 'TextField'. # # But the limitation of the get_internal_type() / data_types approach # is that it cannot handle database column types that aren't already # mapped to one of the built-in Django field types. In this case, you # can implement db_type() instead of get_internal_type() to specify # exactly which wacky database column type you want to use. data = self.db_type_parameters(connection) try: return connection.data_types[self.get_internal_type()] % data except KeyError: return None def rel_db_type(self, connection): """ Return the data type that a related field pointing to this field should use. For example, this method is called by ForeignKey and OneToOneField to determine its data type. """ return self.db_type(connection) def cast_db_type(self, connection): """Return the data type to use in the Cast() function.""" db_type = connection.ops.cast_data_types.get(self.get_internal_type()) if db_type: return db_type % self.db_type_parameters(connection) return self.db_type(connection) def db_parameters(self, connection): """ Extension of db_type(), providing a range of different return values (type, checks). This will look at db_type(), allowing custom model fields to override it. """ type_string = self.db_type(connection) check_string = self.db_check(connection) return { "type": type_string, "check": check_string, } def db_type_suffix(self, connection): return connection.data_types_suffix.get(self.get_internal_type()) def get_db_converters(self, connection): if hasattr(self, 'from_db_value'): return [self.from_db_value] return [] @property def unique(self): return self._unique or self.primary_key @property def db_tablespace(self): return self._db_tablespace or settings.DEFAULT_INDEX_TABLESPACE def set_attributes_from_name(self, name): self.name = self.name or name self.attname, self.column = self.get_attname_column() self.concrete = self.column is not None if self.verbose_name is None and self.name: self.verbose_name = self.name.replace('_', ' ') def contribute_to_class(self, cls, name, private_only=False): """ Register the field with the model class it belongs to. If private_only is True, create a separate instance of this field for every subclass of cls, even if cls is not an abstract model. """ self.set_attributes_from_name(name) self.model = cls cls._meta.add_field(self, private=private_only) if self.column: # Don't override classmethods with the descriptor. This means that # if you have a classmethod and a field with the same name, then # such fields can't be deferred (we don't have a check for this). if not getattr(cls, self.attname, None): setattr(cls, self.attname, self.descriptor_class(self)) if self.choices is not None: setattr(cls, 'get_%s_display' % self.name, partialmethod(cls._get_FIELD_display, field=self)) def get_filter_kwargs_for_object(self, obj): """ Return a dict that when passed as kwargs to self.model.filter(), would yield all instances having the same value for this field as obj has. """ return {self.name: getattr(obj, self.attname)} def get_attname(self): return self.name def get_attname_column(self): attname = self.get_attname() column = self.db_column or attname return attname, column def get_internal_type(self): return self.__class__.__name__ def pre_save(self, model_instance, add): """Return field's value just before saving.""" return getattr(model_instance, self.attname) def get_prep_value(self, value): """Perform preliminary non-db specific value checks and conversions.""" if isinstance(value, Promise): value = value._proxy____cast() return value def get_db_prep_value(self, value, connection, prepared=False): """ Return field's value prepared for interacting with the database backend. Used by the default implementations of get_db_prep_save(). """ if not prepared: value = self.get_prep_value(value) return value def get_db_prep_save(self, value, connection): """Return field's value prepared for saving into a database.""" return self.get_db_prep_value(value, connection=connection, prepared=False) def has_default(self): """Return a boolean of whether this field has a default value.""" return self.default is not NOT_PROVIDED def get_default(self): """Return the default value for this field.""" return self._get_default() @cached_property def _get_default(self): if self.has_default(): if callable(self.default): return self.default return lambda: self.default if not self.empty_strings_allowed or self.null and not connection.features.interprets_empty_strings_as_nulls: return return_None return str # return empty string def get_choices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH, limit_choices_to=None, ordering=()): """ Return choices with a default blank choices included, for use as <select> choices for this field. """ if self.choices is not None: choices = list(self.choices) if include_blank: blank_defined = any(choice in ('', None) for choice, _ in self.flatchoices) if not blank_defined: choices = blank_choice + choices return choices rel_model = self.remote_field.model limit_choices_to = limit_choices_to or self.get_limit_choices_to() choice_func = operator.attrgetter( self.remote_field.get_related_field().attname if hasattr(self.remote_field, 'get_related_field') else 'pk' ) return (blank_choice if include_blank else []) + [ (choice_func(x), str(x)) for x in rel_model._default_manager.complex_filter(limit_choices_to).order_by(*ordering) ] def value_to_string(self, obj): """ Return a string value of this field from the passed obj. This is used by the serialization framework. """ return str(self.value_from_object(obj)) def _get_flatchoices(self): """Flattened version of choices tuple.""" if self.choices is None: return [] flat = [] for choice, value in self.choices: if isinstance(value, (list, tuple)): flat.extend(value) else: flat.append((choice, value)) return flat flatchoices = property(_get_flatchoices) def save_form_data(self, instance, data): setattr(instance, self.name, data) def formfield(self, form_class=None, choices_form_class=None, **kwargs): """Return a django.forms.Field instance for this field.""" defaults = { 'required': not self.blank, 'label': capfirst(self.verbose_name), 'help_text': self.help_text, } if self.has_default(): if callable(self.default): defaults['initial'] = self.default defaults['show_hidden_initial'] = True else: defaults['initial'] = self.get_default() if self.choices is not None: # Fields with choices get special treatment. include_blank = (self.blank or not (self.has_default() or 'initial' in kwargs)) defaults['choices'] = self.get_choices(include_blank=include_blank) defaults['coerce'] = self.to_python if self.null: defaults['empty_value'] = None if choices_form_class is not None: form_class = choices_form_class else: form_class = forms.TypedChoiceField # Many of the subclass-specific formfield arguments (min_value, # max_value) don't apply for choice fields, so be sure to only pass # the values that TypedChoiceField will understand. for k in list(kwargs): if k not in ('coerce', 'empty_value', 'choices', 'required', 'widget', 'label', 'initial', 'help_text', 'error_messages', 'show_hidden_initial', 'disabled'): del kwargs[k] defaults.update(kwargs) if form_class is None: form_class = forms.CharField return form_class(**defaults) def value_from_object(self, obj): """Return the value of this field in the given model instance.""" return getattr(obj, self.attname) class AutoField(Field): description = _("Integer") empty_strings_allowed = False default_error_messages = { 'invalid': _('“%(value)s” value must be an integer.'), } def __init__(self, *args, **kwargs): kwargs['blank'] = True super().__init__(*args, **kwargs) def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_primary_key(), ] def _check_primary_key(self): if not self.primary_key: return [ checks.Error( 'AutoFields must set primary_key=True.', obj=self, id='fields.E100', ), ] else: return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() del kwargs['blank'] kwargs['primary_key'] = True return name, path, args, kwargs def get_internal_type(self): return "AutoField" def to_python(self, value): if value is None: return value try: return int(value) except (TypeError, ValueError): raise exceptions.ValidationError( self.error_messages['invalid'], code='invalid', params={'value': value}, ) def rel_db_type(self, connection): return IntegerField().db_type(connection=connection) def validate(self, value, model_instance): pass def get_db_prep_value(self, value, connection, prepared=False): if not prepared: value = self.get_prep_value(value) value = connection.ops.validate_autopk_value(value) return value def get_prep_value(self, value): from django.db.models.expressions import OuterRef value = super().get_prep_value(value) if value is None or isinstance(value, OuterRef): return value return int(value) def contribute_to_class(self, cls, name, **kwargs): assert not cls._meta.auto_field, "Model %s can't have more than one AutoField." % cls._meta.label super().contribute_to_class(cls, name, **kwargs) cls._meta.auto_field = self def formfield(self, **kwargs): return None class BigAutoField(AutoField): description = _("Big (8 byte) integer") def get_internal_type(self): return "BigAutoField" def rel_db_type(self, connection): return BigIntegerField().db_type(connection=connection) class BooleanField(Field): empty_strings_allowed = False default_error_messages = { 'invalid': _('“%(value)s” value must be either True or False.'), 'invalid_nullable': _('“%(value)s” value must be either True, False, or None.'), } description = _("Boolean (Either True or False)") def get_internal_type(self): return "BooleanField" def to_python(self, value): if self.null and value in self.empty_values: return None if value in (True, False): # 1/0 are equal to True/False. bool() converts former to latter. return bool(value) if value in ('t', 'True', '1'): return True if value in ('f', 'False', '0'): return False raise exceptions.ValidationError( self.error_messages['invalid_nullable' if self.null else 'invalid'], code='invalid', params={'value': value}, ) def get_prep_value(self, value): value = super().get_prep_value(value) if value is None: return None return self.to_python(value) def formfield(self, **kwargs): if self.choices is not None: include_blank = not (self.has_default() or 'initial' in kwargs) defaults = {'choices': self.get_choices(include_blank=include_blank)} else: form_class = forms.NullBooleanField if self.null else forms.BooleanField # In HTML checkboxes, 'required' means "must be checked" which is # different from the choices case ("must select some value"). # required=False allows unchecked checkboxes. defaults = {'form_class': form_class, 'required': False} return super().formfield(**{**defaults, **kwargs}) class CharField(Field): description = _("String (up to %(max_length)s)") def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.validators.append(validators.MaxLengthValidator(self.max_length)) def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_max_length_attribute(**kwargs), ] def _check_max_length_attribute(self, **kwargs): if self.max_length is None: return [ checks.Error( "CharFields must define a 'max_length' attribute.", obj=self, id='fields.E120', ) ] elif (not isinstance(self.max_length, int) or isinstance(self.max_length, bool) or self.max_length <= 0): return [ checks.Error( "'max_length' must be a positive integer.", obj=self, id='fields.E121', ) ] else: return [] def cast_db_type(self, connection): if self.max_length is None: return connection.ops.cast_char_field_without_max_length return super().cast_db_type(connection) def get_internal_type(self): return "CharField" def to_python(self, value): if isinstance(value, str) or value is None: return value return str(value) def get_prep_value(self, value): value = super().get_prep_value(value) return self.to_python(value) def formfield(self, **kwargs): # Passing max_length to forms.CharField means that the value's length # will be validated twice. This is considered acceptable since we want # the value in the form field (to pass into widget for example). defaults = {'max_length': self.max_length} # TODO: Handle multiple backends with different feature flags. if self.null and not connection.features.interprets_empty_strings_as_nulls: defaults['empty_value'] = None defaults.update(kwargs) return super().formfield(**defaults) class CommaSeparatedIntegerField(CharField): default_validators = [validators.validate_comma_separated_integer_list] description = _("Comma-separated integers") system_check_removed_details = { 'msg': ( 'CommaSeparatedIntegerField is removed except for support in ' 'historical migrations.' ), 'hint': ( 'Use CharField(validators=[validate_comma_separated_integer_list]) ' 'instead.' ), 'id': 'fields.E901', } class DateTimeCheckMixin: def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_mutually_exclusive_options(), *self._check_fix_default_value(), ] def _check_mutually_exclusive_options(self): # auto_now, auto_now_add, and default are mutually exclusive # options. The use of more than one of these options together # will trigger an Error mutually_exclusive_options = [self.auto_now_add, self.auto_now, self.has_default()] enabled_options = [option not in (None, False) for option in mutually_exclusive_options].count(True) if enabled_options > 1: return [ checks.Error( "The options auto_now, auto_now_add, and default " "are mutually exclusive. Only one of these options " "may be present.", obj=self, id='fields.E160', ) ] else: return [] def _check_fix_default_value(self): return [] class DateField(DateTimeCheckMixin, Field): empty_strings_allowed = False default_error_messages = { 'invalid': _('“%(value)s” value has an invalid date format. It must be ' 'in YYYY-MM-DD format.'), 'invalid_date': _('“%(value)s” value has the correct format (YYYY-MM-DD) ' 'but it is an invalid date.'), } description = _("Date (without time)") def __init__(self, verbose_name=None, name=None, auto_now=False, auto_now_add=False, **kwargs): self.auto_now, self.auto_now_add = auto_now, auto_now_add if auto_now or auto_now_add: kwargs['editable'] = False kwargs['blank'] = True super().__init__(verbose_name, name, **kwargs) def _check_fix_default_value(self): """ Warn that using an actual date or datetime value is probably wrong; it's only evaluated on server startup. """ if not self.has_default(): return [] now = timezone.now() if not timezone.is_naive(now): now = timezone.make_naive(now, timezone.utc) value = self.default if isinstance(value, datetime.datetime): if not timezone.is_naive(value): value = timezone.make_naive(value, timezone.utc) value = value.date() elif isinstance(value, datetime.date): # Nothing to do, as dates don't have tz information pass else: # No explicit date / datetime value -- no checks necessary return [] offset = datetime.timedelta(days=1) lower = (now - offset).date() upper = (now + offset).date() if lower <= value <= upper: return [ checks.Warning( 'Fixed default value provided.', hint='It seems you set a fixed date / time / datetime ' 'value as default for this field. This may not be ' 'what you want. If you want to have the current date ' 'as default, use `django.utils.timezone.now`', obj=self, id='fields.W161', ) ] return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.auto_now: kwargs['auto_now'] = True if self.auto_now_add: kwargs['auto_now_add'] = True if self.auto_now or self.auto_now_add: del kwargs['editable'] del kwargs['blank'] return name, path, args, kwargs def get_internal_type(self): return "DateField" def to_python(self, value): if value is None: return value if isinstance(value, datetime.datetime): if settings.USE_TZ and timezone.is_aware(value): # Convert aware datetimes to the default time zone # before casting them to dates (#17742). default_timezone = timezone.get_default_timezone() value = timezone.make_naive(value, default_timezone) return value.date() if isinstance(value, datetime.date): return value try: parsed = parse_date(value) if parsed is not None: return parsed except ValueError: raise exceptions.ValidationError( self.error_messages['invalid_date'], code='invalid_date', params={'value': value}, ) raise exceptions.ValidationError( self.error_messages['invalid'], code='invalid', params={'value': value}, ) def pre_save(self, model_instance, add): if self.auto_now or (self.auto_now_add and add): value = datetime.date.today() setattr(model_instance, self.attname, value) return value else: return super().pre_save(model_instance, add) def contribute_to_class(self, cls, name, **kwargs): super().contribute_to_class(cls, name, **kwargs) if not self.null: setattr( cls, 'get_next_by_%s' % self.name, partialmethod(cls._get_next_or_previous_by_FIELD, field=self, is_next=True) ) setattr( cls, 'get_previous_by_%s' % self.name, partialmethod(cls._get_next_or_previous_by_FIELD, field=self, is_next=False) ) def get_prep_value(self, value): value = super().get_prep_value(value) return self.to_python(value) def get_db_prep_value(self, value, connection, prepared=False): # Casts dates into the format expected by the backend if not prepared: value = self.get_prep_value(value) return connection.ops.adapt_datefield_value(value) def value_to_string(self, obj): val = self.value_from_object(obj) return '' if val is None else val.isoformat() def formfield(self, **kwargs): return super().formfield(**{ 'form_class': forms.DateField, **kwargs, }) class DateTimeField(DateField): empty_strings_allowed = False default_error_messages = { 'invalid': _('“%(value)s” value has an invalid format. It must be in ' 'YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ] format.'), 'invalid_date': _("“%(value)s” value has the correct format " "(YYYY-MM-DD) but it is an invalid date."), 'invalid_datetime': _('“%(value)s” value has the correct format ' '(YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ]) ' 'but it is an invalid date/time.'), } description = _("Date (with time)") # __init__ is inherited from DateField def _check_fix_default_value(self): """ Warn that using an actual date or datetime value is probably wrong; it's only evaluated on server startup. """ if not self.has_default(): return [] now = timezone.now() if not timezone.is_naive(now): now = timezone.make_naive(now, timezone.utc) value = self.default if isinstance(value, datetime.datetime): second_offset = datetime.timedelta(seconds=10) lower = now - second_offset upper = now + second_offset if timezone.is_aware(value): value = timezone.make_naive(value, timezone.utc) elif isinstance(value, datetime.date): second_offset = datetime.timedelta(seconds=10) lower = now - second_offset lower = datetime.datetime(lower.year, lower.month, lower.day) upper = now + second_offset upper = datetime.datetime(upper.year, upper.month, upper.day) value = datetime.datetime(value.year, value.month, value.day) else: # No explicit date / datetime value -- no checks necessary return [] if lower <= value <= upper: return [ checks.Warning( 'Fixed default value provided.', hint='It seems you set a fixed date / time / datetime ' 'value as default for this field. This may not be ' 'what you want. If you want to have the current date ' 'as default, use `django.utils.timezone.now`', obj=self, id='fields.W161', ) ] return [] def get_internal_type(self): return "DateTimeField" def to_python(self, value): if value is None: return value if isinstance(value, datetime.datetime): return value if isinstance(value, datetime.date): value = datetime.datetime(value.year, value.month, value.day) if settings.USE_TZ: # For backwards compatibility, interpret naive datetimes in # local time. This won't work during DST change, but we can't # do much about it, so we let the exceptions percolate up the # call stack. warnings.warn("DateTimeField %s.%s received a naive datetime " "(%s) while time zone support is active." % (self.model.__name__, self.name, value), RuntimeWarning) default_timezone = timezone.get_default_timezone() value = timezone.make_aware(value, default_timezone) return value try: parsed = parse_datetime(value) if parsed is not None: return parsed except ValueError: raise exceptions.ValidationError( self.error_messages['invalid_datetime'], code='invalid_datetime', params={'value': value}, ) try: parsed = parse_date(value) if parsed is not None: return datetime.datetime(parsed.year, parsed.month, parsed.day) except ValueError: raise exceptions.ValidationError( self.error_messages['invalid_date'], code='invalid_date', params={'value': value}, ) raise exceptions.ValidationError( self.error_messages['invalid'], code='invalid', params={'value': value}, ) def pre_save(self, model_instance, add): if self.auto_now or (self.auto_now_add and add): value = timezone.now() setattr(model_instance, self.attname, value) return value else: return super().pre_save(model_instance, add) # contribute_to_class is inherited from DateField, it registers # get_next_by_FOO and get_prev_by_FOO def get_prep_value(self, value): value = super().get_prep_value(value) value = self.to_python(value) if value is not None and settings.USE_TZ and timezone.is_naive(value): # For backwards compatibility, interpret naive datetimes in local # time. This won't work during DST change, but we can't do much # about it, so we let the exceptions percolate up the call stack. try: name = '%s.%s' % (self.model.__name__, self.name) except AttributeError: name = '(unbound)' warnings.warn("DateTimeField %s received a naive datetime (%s)" " while time zone support is active." % (name, value), RuntimeWarning) default_timezone = timezone.get_default_timezone() value = timezone.make_aware(value, default_timezone) return value def get_db_prep_value(self, value, connection, prepared=False): # Casts datetimes into the format expected by the backend if not prepared: value = self.get_prep_value(value) return connection.ops.adapt_datetimefield_value(value) def value_to_string(self, obj): val = self.value_from_object(obj) return '' if val is None else val.isoformat() def formfield(self, **kwargs): return super().formfield(**{ 'form_class': forms.DateTimeField, **kwargs, }) class DecimalField(Field): empty_strings_allowed = False default_error_messages = { 'invalid': _('“%(value)s” value must be a decimal number.'), } description = _("Decimal number") def __init__(self, verbose_name=None, name=None, max_digits=None, decimal_places=None, **kwargs): self.max_digits, self.decimal_places = max_digits, decimal_places super().__init__(verbose_name, name, **kwargs) def check(self, **kwargs): errors = super().check(**kwargs) digits_errors = [ *self._check_decimal_places(), *self._check_max_digits(), ] if not digits_errors: errors.extend(self._check_decimal_places_and_max_digits(**kwargs)) else: errors.extend(digits_errors) return errors def _check_decimal_places(self): try: decimal_places = int(self.decimal_places) if decimal_places < 0: raise ValueError() except TypeError: return [ checks.Error( "DecimalFields must define a 'decimal_places' attribute.", obj=self, id='fields.E130', ) ] except ValueError: return [ checks.Error( "'decimal_places' must be a non-negative integer.", obj=self, id='fields.E131', ) ] else: return [] def _check_max_digits(self): try: max_digits = int(self.max_digits) if max_digits <= 0: raise ValueError() except TypeError: return [ checks.Error( "DecimalFields must define a 'max_digits' attribute.", obj=self, id='fields.E132', ) ] except ValueError: return [ checks.Error( "'max_digits' must be a positive integer.", obj=self, id='fields.E133', ) ] else: return [] def _check_decimal_places_and_max_digits(self, **kwargs): if int(self.decimal_places) > int(self.max_digits): return [ checks.Error( "'max_digits' must be greater or equal to 'decimal_places'.", obj=self, id='fields.E134', ) ] return [] @cached_property def validators(self): return super().validators + [ validators.DecimalValidator(self.max_digits, self.decimal_places) ] @cached_property def context(self): return decimal.Context(prec=self.max_digits) def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.max_digits is not None: kwargs['max_digits'] = self.max_digits if self.decimal_places is not None: kwargs['decimal_places'] = self.decimal_places return name, path, args, kwargs def get_internal_type(self): return "DecimalField" def to_python(self, value): if value is None: return value if isinstance(value, float): return self.context.create_decimal_from_float(value) try: return decimal.Decimal(value) except decimal.InvalidOperation: raise exceptions.ValidationError( self.error_messages['invalid'], code='invalid', params={'value': value}, ) def get_db_prep_save(self, value, connection): return connection.ops.adapt_decimalfield_value(self.to_python(value), self.max_digits, self.decimal_places) def get_prep_value(self, value): value = super().get_prep_value(value) return self.to_python(value) def formfield(self, **kwargs): return super().formfield(**{ 'max_digits': self.max_digits, 'decimal_places': self.decimal_places, 'form_class': forms.DecimalField, **kwargs, }) class DurationField(Field): """ Store timedelta objects. Use interval on PostgreSQL, INTERVAL DAY TO SECOND on Oracle, and bigint of microseconds on other databases. """ empty_strings_allowed = False default_error_messages = { 'invalid': _('“%(value)s” value has an invalid format. It must be in ' '[DD] [[HH:]MM:]ss[.uuuuuu] format.') } description = _("Duration") def get_internal_type(self): return "DurationField" def to_python(self, value): if value is None: return value if isinstance(value, datetime.timedelta): return value try: parsed = parse_duration(value) except ValueError: pass else: if parsed is not None: return parsed raise exceptions.ValidationError( self.error_messages['invalid'], code='invalid', params={'value': value}, ) def get_db_prep_value(self, value, connection, prepared=False): if connection.features.has_native_duration_field: return value if value is None: return None return duration_microseconds(value) def get_db_converters(self, connection): converters = [] if not connection.features.has_native_duration_field: converters.append(connection.ops.convert_durationfield_value) return converters + super().get_db_converters(connection) def value_to_string(self, obj): val = self.value_from_object(obj) return '' if val is None else duration_string(val) def formfield(self, **kwargs): return super().formfield(**{ 'form_class': forms.DurationField, **kwargs, }) class EmailField(CharField): default_validators = [validators.validate_email] description = _("Email address") def __init__(self, *args, **kwargs): # max_length=254 to be compliant with RFCs 3696 and 5321 kwargs.setdefault('max_length', 254) super().__init__(*args, **kwargs) def deconstruct(self): name, path, args, kwargs = super().deconstruct() # We do not exclude max_length if it matches default as we want to change # the default in future. return name, path, args, kwargs def formfield(self, **kwargs): # As with CharField, this will cause email validation to be performed # twice. return super().formfield(**{ 'form_class': forms.EmailField, **kwargs, }) class FilePathField(Field): description = _("File path") def __init__(self, verbose_name=None, name=None, path='', match=None, recursive=False, allow_files=True, allow_folders=False, **kwargs): self.path, self.match, self.recursive = path, match, recursive self.allow_files, self.allow_folders = allow_files, allow_folders kwargs.setdefault('max_length', 100) super().__init__(verbose_name, name, **kwargs) def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_allowing_files_or_folders(**kwargs), ] def _check_allowing_files_or_folders(self, **kwargs): if not self.allow_files and not self.allow_folders: return [ checks.Error( "FilePathFields must have either 'allow_files' or 'allow_folders' set to True.", obj=self, id='fields.E140', ) ] return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.path != '': kwargs['path'] = self.path if self.match is not None: kwargs['match'] = self.match if self.recursive is not False: kwargs['recursive'] = self.recursive if self.allow_files is not True: kwargs['allow_files'] = self.allow_files if self.allow_folders is not False: kwargs['allow_folders'] = self.allow_folders if kwargs.get("max_length") == 100: del kwargs["max_length"] return name, path, args, kwargs def get_prep_value(self, value): value = super().get_prep_value(value) if value is None: return None return str(value) def formfield(self, **kwargs): return super().formfield(**{ 'path': self.path() if callable(self.path) else self.path, 'match': self.match, 'recursive': self.recursive, 'form_class': forms.FilePathField, 'allow_files': self.allow_files, 'allow_folders': self.allow_folders, **kwargs, }) def get_internal_type(self): return "FilePathField" class FloatField(Field): empty_strings_allowed = False default_error_messages = { 'invalid': _('“%(value)s” value must be a float.'), } description = _("Floating point number") def get_prep_value(self, value): value = super().get_prep_value(value) if value is None: return None return float(value) def get_internal_type(self): return "FloatField" def to_python(self, value): if value is None: return value try: return float(value) except (TypeError, ValueError): raise exceptions.ValidationError( self.error_messages['invalid'], code='invalid', params={'value': value}, ) def formfield(self, **kwargs): return super().formfield(**{ 'form_class': forms.FloatField, **kwargs, }) class IntegerField(Field): empty_strings_allowed = False default_error_messages = { 'invalid': _('“%(value)s” value must be an integer.'), } description = _("Integer") def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_max_length_warning(), ] def _check_max_length_warning(self): if self.max_length is not None: return [ checks.Warning( "'max_length' is ignored when used with %s." % self.__class__.__name__, hint="Remove 'max_length' from field", obj=self, id='fields.W122', ) ] return [] @cached_property def validators(self): # These validators can't be added at field initialization time since # they're based on values retrieved from `connection`. validators_ = super().validators internal_type = self.get_internal_type() min_value, max_value = connection.ops.integer_field_range(internal_type) if min_value is not None and not any( ( isinstance(validator, validators.MinValueValidator) and ( validator.limit_value() if callable(validator.limit_value) else validator.limit_value ) >= min_value ) for validator in validators_ ): validators_.append(validators.MinValueValidator(min_value)) if max_value is not None and not any( ( isinstance(validator, validators.MaxValueValidator) and ( validator.limit_value() if callable(validator.limit_value) else validator.limit_value ) <= max_value ) for validator in validators_ ): validators_.append(validators.MaxValueValidator(max_value)) return validators_ def get_prep_value(self, value): value = super().get_prep_value(value) if value is None: return None return int(value) def get_internal_type(self): return "IntegerField" def to_python(self, value): if value is None: return value try: return int(value) except (TypeError, ValueError): raise exceptions.ValidationError( self.error_messages['invalid'], code='invalid', params={'value': value}, ) def formfield(self, **kwargs): return super().formfield(**{ 'form_class': forms.IntegerField, **kwargs, }) class BigIntegerField(IntegerField): description = _("Big (8 byte) integer") MAX_BIGINT = 9223372036854775807 def get_internal_type(self): return "BigIntegerField" def formfield(self, **kwargs): return super().formfield(**{ 'min_value': -BigIntegerField.MAX_BIGINT - 1, 'max_value': BigIntegerField.MAX_BIGINT, **kwargs, }) class IPAddressField(Field): empty_strings_allowed = False description = _("IPv4 address") system_check_removed_details = { 'msg': ( 'IPAddressField has been removed except for support in ' 'historical migrations.' ), 'hint': 'Use GenericIPAddressField instead.', 'id': 'fields.E900', } def __init__(self, *args, **kwargs): kwargs['max_length'] = 15 super().__init__(*args, **kwargs) def deconstruct(self): name, path, args, kwargs = super().deconstruct() del kwargs['max_length'] return name, path, args, kwargs def get_prep_value(self, value): value = super().get_prep_value(value) if value is None: return None return str(value) def get_internal_type(self): return "IPAddressField" class GenericIPAddressField(Field): empty_strings_allowed = False description = _("IP address") default_error_messages = {} def __init__(self, verbose_name=None, name=None, protocol='both', unpack_ipv4=False, *args, **kwargs): self.unpack_ipv4 = unpack_ipv4 self.protocol = protocol self.default_validators, invalid_error_message = \ validators.ip_address_validators(protocol, unpack_ipv4) self.default_error_messages['invalid'] = invalid_error_message kwargs['max_length'] = 39 super().__init__(verbose_name, name, *args, **kwargs) def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_blank_and_null_values(**kwargs), ] def _check_blank_and_null_values(self, **kwargs): if not getattr(self, 'null', False) and getattr(self, 'blank', False): return [ checks.Error( 'GenericIPAddressFields cannot have blank=True if null=False, ' 'as blank values are stored as nulls.', obj=self, id='fields.E150', ) ] return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.unpack_ipv4 is not False: kwargs['unpack_ipv4'] = self.unpack_ipv4 if self.protocol != "both": kwargs['protocol'] = self.protocol if kwargs.get("max_length") == 39: del kwargs['max_length'] return name, path, args, kwargs def get_internal_type(self): return "GenericIPAddressField" def to_python(self, value): if value is None: return None if not isinstance(value, str): value = str(value) value = value.strip() if ':' in value: return clean_ipv6_address(value, self.unpack_ipv4, self.error_messages['invalid']) return value def get_db_prep_value(self, value, connection, prepared=False): if not prepared: value = self.get_prep_value(value) return connection.ops.adapt_ipaddressfield_value(value) def get_prep_value(self, value): value = super().get_prep_value(value) if value is None: return None if value and ':' in value: try: return clean_ipv6_address(value, self.unpack_ipv4) except exceptions.ValidationError: pass return str(value) def formfield(self, **kwargs): return super().formfield(**{ 'protocol': self.protocol, 'form_class': forms.GenericIPAddressField, **kwargs, }) class NullBooleanField(BooleanField): default_error_messages = { 'invalid': _('“%(value)s” value must be either None, True or False.'), 'invalid_nullable': _('“%(value)s” value must be either None, True or False.'), } description = _("Boolean (Either True, False or None)") def __init__(self, *args, **kwargs): kwargs['null'] = True kwargs['blank'] = True super().__init__(*args, **kwargs) def deconstruct(self): name, path, args, kwargs = super().deconstruct() del kwargs['null'] del kwargs['blank'] return name, path, args, kwargs def get_internal_type(self): return "NullBooleanField" class PositiveIntegerRelDbTypeMixin: def rel_db_type(self, connection): """ Return the data type that a related field pointing to this field should use. In most cases, a foreign key pointing to a positive integer primary key will have an integer column data type but some databases (e.g. MySQL) have an unsigned integer type. In that case (related_fields_match_type=True), the primary key should return its db_type. """ if connection.features.related_fields_match_type: return self.db_type(connection) else: return IntegerField().db_type(connection=connection) class PositiveIntegerField(PositiveIntegerRelDbTypeMixin, IntegerField): description = _("Positive integer") def get_internal_type(self): return "PositiveIntegerField" def formfield(self, **kwargs): return super().formfield(**{ 'min_value': 0, **kwargs, }) class PositiveSmallIntegerField(PositiveIntegerRelDbTypeMixin, IntegerField): description = _("Positive small integer") def get_internal_type(self): return "PositiveSmallIntegerField" def formfield(self, **kwargs): return super().formfield(**{ 'min_value': 0, **kwargs, }) class SlugField(CharField): default_validators = [validators.validate_slug] description = _("Slug (up to %(max_length)s)") def __init__(self, *args, max_length=50, db_index=True, allow_unicode=False, **kwargs): self.allow_unicode = allow_unicode if self.allow_unicode: self.default_validators = [validators.validate_unicode_slug] super().__init__(*args, max_length=max_length, db_index=db_index, **kwargs) def deconstruct(self): name, path, args, kwargs = super().deconstruct() if kwargs.get("max_length") == 50: del kwargs['max_length'] if self.db_index is False: kwargs['db_index'] = False else: del kwargs['db_index'] if self.allow_unicode is not False: kwargs['allow_unicode'] = self.allow_unicode return name, path, args, kwargs def get_internal_type(self): return "SlugField" def formfield(self, **kwargs): return super().formfield(**{ 'form_class': forms.SlugField, 'allow_unicode': self.allow_unicode, **kwargs, }) class SmallIntegerField(IntegerField): description = _("Small integer") def get_internal_type(self): return "SmallIntegerField" class TextField(Field): description = _("Text") def get_internal_type(self): return "TextField" def to_python(self, value): if isinstance(value, str) or value is None: return value return str(value) def get_prep_value(self, value): value = super().get_prep_value(value) return self.to_python(value) def formfield(self, **kwargs): # Passing max_length to forms.CharField means that the value's length # will be validated twice. This is considered acceptable since we want # the value in the form field (to pass into widget for example). return super().formfield(**{ 'max_length': self.max_length, **({} if self.choices is not None else {'widget': forms.Textarea}), **kwargs, }) class TimeField(DateTimeCheckMixin, Field): empty_strings_allowed = False default_error_messages = { 'invalid': _('“%(value)s” value has an invalid format. It must be in ' 'HH:MM[:ss[.uuuuuu]] format.'), 'invalid_time': _('“%(value)s” value has the correct format ' '(HH:MM[:ss[.uuuuuu]]) but it is an invalid time.'), } description = _("Time") def __init__(self, verbose_name=None, name=None, auto_now=False, auto_now_add=False, **kwargs): self.auto_now, self.auto_now_add = auto_now, auto_now_add if auto_now or auto_now_add: kwargs['editable'] = False kwargs['blank'] = True super().__init__(verbose_name, name, **kwargs) def _check_fix_default_value(self): """ Warn that using an actual date or datetime value is probably wrong; it's only evaluated on server startup. """ if not self.has_default(): return [] now = timezone.now() if not timezone.is_naive(now): now = timezone.make_naive(now, timezone.utc) value = self.default if isinstance(value, datetime.datetime): second_offset = datetime.timedelta(seconds=10) lower = now - second_offset upper = now + second_offset if timezone.is_aware(value): value = timezone.make_naive(value, timezone.utc) elif isinstance(value, datetime.time): second_offset = datetime.timedelta(seconds=10) lower = now - second_offset upper = now + second_offset value = datetime.datetime.combine(now.date(), value) if timezone.is_aware(value): value = timezone.make_naive(value, timezone.utc).time() else: # No explicit time / datetime value -- no checks necessary return [] if lower <= value <= upper: return [ checks.Warning( 'Fixed default value provided.', hint='It seems you set a fixed date / time / datetime ' 'value as default for this field. This may not be ' 'what you want. If you want to have the current date ' 'as default, use `django.utils.timezone.now`', obj=self, id='fields.W161', ) ] return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.auto_now is not False: kwargs["auto_now"] = self.auto_now if self.auto_now_add is not False: kwargs["auto_now_add"] = self.auto_now_add if self.auto_now or self.auto_now_add: del kwargs['blank'] del kwargs['editable'] return name, path, args, kwargs def get_internal_type(self): return "TimeField" def to_python(self, value): if value is None: return None if isinstance(value, datetime.time): return value if isinstance(value, datetime.datetime): # Not usually a good idea to pass in a datetime here (it loses # information), but this can be a side-effect of interacting with a # database backend (e.g. Oracle), so we'll be accommodating. return value.time() try: parsed = parse_time(value) if parsed is not None: return parsed except ValueError: raise exceptions.ValidationError( self.error_messages['invalid_time'], code='invalid_time', params={'value': value}, ) raise exceptions.ValidationError( self.error_messages['invalid'], code='invalid', params={'value': value}, ) def pre_save(self, model_instance, add): if self.auto_now or (self.auto_now_add and add): value = datetime.datetime.now().time() setattr(model_instance, self.attname, value) return value else: return super().pre_save(model_instance, add) def get_prep_value(self, value): value = super().get_prep_value(value) return self.to_python(value) def get_db_prep_value(self, value, connection, prepared=False): # Casts times into the format expected by the backend if not prepared: value = self.get_prep_value(value) return connection.ops.adapt_timefield_value(value) def value_to_string(self, obj): val = self.value_from_object(obj) return '' if val is None else val.isoformat() def formfield(self, **kwargs): return super().formfield(**{ 'form_class': forms.TimeField, **kwargs, }) class URLField(CharField): default_validators = [validators.URLValidator()] description = _("URL") def __init__(self, verbose_name=None, name=None, **kwargs): kwargs.setdefault('max_length', 200) super().__init__(verbose_name, name, **kwargs) def deconstruct(self): name, path, args, kwargs = super().deconstruct() if kwargs.get("max_length") == 200: del kwargs['max_length'] return name, path, args, kwargs def formfield(self, **kwargs): # As with CharField, this will cause URL validation to be performed # twice. return super().formfield(**{ 'form_class': forms.URLField, **kwargs, }) class BinaryField(Field): description = _("Raw binary data") empty_values = [None, b''] def __init__(self, *args, **kwargs): kwargs.setdefault('editable', False) super().__init__(*args, **kwargs) if self.max_length is not None: self.validators.append(validators.MaxLengthValidator(self.max_length)) def check(self, **kwargs): return [*super().check(**kwargs), *self._check_str_default_value()] def _check_str_default_value(self): if self.has_default() and isinstance(self.default, str): return [ checks.Error( "BinaryField's default cannot be a string. Use bytes " "content instead.", obj=self, id='fields.E170', ) ] return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.editable: kwargs['editable'] = True else: del kwargs['editable'] return name, path, args, kwargs def get_internal_type(self): return "BinaryField" def get_placeholder(self, value, compiler, connection): return connection.ops.binary_placeholder_sql(value) def get_default(self): if self.has_default() and not callable(self.default): return self.default default = super().get_default() if default == '': return b'' return default def get_db_prep_value(self, value, connection, prepared=False): value = super().get_db_prep_value(value, connection, prepared) if value is not None: return connection.Database.Binary(value) return value def value_to_string(self, obj): """Binary data is serialized as base64""" return b64encode(self.value_from_object(obj)).decode('ascii') def to_python(self, value): # If it's a string, it should be base64-encoded data if isinstance(value, str): return memoryview(b64decode(value.encode('ascii'))) return value class UUIDField(Field): default_error_messages = { 'invalid': _('“%(value)s” is not a valid UUID.'), } description = _('Universally unique identifier') empty_strings_allowed = False def __init__(self, verbose_name=None, **kwargs): kwargs['max_length'] = 32 super().__init__(verbose_name, **kwargs) def deconstruct(self): name, path, args, kwargs = super().deconstruct() del kwargs['max_length'] return name, path, args, kwargs def get_internal_type(self): return "UUIDField" def get_prep_value(self, value): value = super().get_prep_value(value) return self.to_python(value) def get_db_prep_value(self, value, connection, prepared=False): if value is None: return None if not isinstance(value, uuid.UUID): value = self.to_python(value) if connection.features.has_native_uuid_field: return value return value.hex def to_python(self, value): if value is not None and not isinstance(value, uuid.UUID): input_form = 'int' if isinstance(value, int) else 'hex' try: return uuid.UUID(**{input_form: value}) except (AttributeError, ValueError): raise exceptions.ValidationError( self.error_messages['invalid'], code='invalid', params={'value': value}, ) return value def formfield(self, **kwargs): return super().formfield(**{ 'form_class': forms.UUIDField, **kwargs, })
8e1520d20b60729ae9ca36ad495b6e67c64fc43b516d69e3888a1351e0248cba
""" Create SQL statements for QuerySets. The code in here encapsulates all of the SQL construction so that QuerySets themselves do not have to (and could be backed by things other than SQL databases). The abstraction barrier only works one way: this module has to know all about the internals of models in order to get the information it needs. """ import difflib import functools import inspect import sys import warnings from collections import Counter, namedtuple from collections.abc import Iterator, Mapping from itertools import chain, count, product from string import ascii_uppercase from django.core.exceptions import ( EmptyResultSet, FieldDoesNotExist, FieldError, ) from django.db import DEFAULT_DB_ALIAS, NotSupportedError, connections from django.db.models.aggregates import Count from django.db.models.constants import LOOKUP_SEP from django.db.models.expressions import ( BaseExpression, Col, F, OuterRef, Ref, SimpleCol, ) from django.db.models.fields import Field from django.db.models.fields.related_lookups import MultiColSource from django.db.models.lookups import Lookup from django.db.models.query_utils import ( Q, check_rel_lookup_compatibility, refs_expression, ) from django.db.models.sql.constants import ( INNER, LOUTER, ORDER_DIR, ORDER_PATTERN, SINGLE, ) from django.db.models.sql.datastructures import ( BaseTable, Empty, Join, MultiJoin, ) from django.db.models.sql.where import ( AND, OR, ExtraWhere, NothingNode, WhereNode, ) from django.utils.deprecation import RemovedInDjango40Warning from django.utils.functional import cached_property from django.utils.tree import Node __all__ = ['Query', 'RawQuery'] def get_field_names_from_opts(opts): return set(chain.from_iterable( (f.name, f.attname) if f.concrete else (f.name,) for f in opts.get_fields() )) def get_children_from_q(q): for child in q.children: if isinstance(child, Node): yield from get_children_from_q(child) else: yield child JoinInfo = namedtuple( 'JoinInfo', ('final_field', 'targets', 'opts', 'joins', 'path', 'transform_function') ) def _get_col(target, field, alias, simple_col): if simple_col: return SimpleCol(target, field) return target.get_col(alias, field) class RawQuery: """A single raw SQL query.""" def __init__(self, sql, using, params=None): self.params = params or () self.sql = sql self.using = using self.cursor = None # Mirror some properties of a normal query so that # the compiler can be used to process results. self.low_mark, self.high_mark = 0, None # Used for offset/limit self.extra_select = {} self.annotation_select = {} def chain(self, using): return self.clone(using) def clone(self, using): return RawQuery(self.sql, using, params=self.params) def get_columns(self): if self.cursor is None: self._execute_query() converter = connections[self.using].introspection.identifier_converter return [converter(column_meta[0]) for column_meta in self.cursor.description] def __iter__(self): # Always execute a new query for a new iterator. # This could be optimized with a cache at the expense of RAM. self._execute_query() if not connections[self.using].features.can_use_chunked_reads: # If the database can't use chunked reads we need to make sure we # evaluate the entire query up front. result = list(self.cursor) else: result = self.cursor return iter(result) def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self) @property def params_type(self): return dict if isinstance(self.params, Mapping) else tuple def __str__(self): return self.sql % self.params_type(self.params) def _execute_query(self): connection = connections[self.using] # Adapt parameters to the database, as much as possible considering # that the target type isn't known. See #17755. params_type = self.params_type adapter = connection.ops.adapt_unknown_value if params_type is tuple: params = tuple(adapter(val) for val in self.params) elif params_type is dict: params = {key: adapter(val) for key, val in self.params.items()} else: raise RuntimeError("Unexpected params type: %s" % params_type) self.cursor = connection.cursor() self.cursor.execute(self.sql, params) class Query(BaseExpression): """A single SQL query.""" alias_prefix = 'T' subq_aliases = frozenset([alias_prefix]) compiler = 'SQLCompiler' def __init__(self, model, where=WhereNode): self.model = model self.alias_refcount = {} # alias_map is the most important data structure regarding joins. # It's used for recording which joins exist in the query and what # types they are. The key is the alias of the joined table (possibly # the table name) and the value is a Join-like object (see # sql.datastructures.Join for more information). self.alias_map = {} # Sometimes the query contains references to aliases in outer queries (as # a result of split_exclude). Correct alias quoting needs to know these # aliases too. self.external_aliases = set() self.table_map = {} # Maps table names to list of aliases. self.default_cols = True self.default_ordering = True self.standard_ordering = True self.used_aliases = set() self.filter_is_sticky = False self.subquery = False # SQL-related attributes # Select and related select clauses are expressions to use in the # SELECT clause of the query. # The select is used for cases where we want to set up the select # clause to contain other than default fields (values(), subqueries...) # Note that annotations go to annotations dictionary. self.select = () self.where = where() self.where_class = where # The group_by attribute can have one of the following forms: # - None: no group by at all in the query # - A tuple of expressions: group by (at least) those expressions. # String refs are also allowed for now. # - True: group by all select fields of the model # See compiler.get_group_by() for details. self.group_by = None self.order_by = () self.low_mark, self.high_mark = 0, None # Used for offset/limit self.distinct = False self.distinct_fields = () self.select_for_update = False self.select_for_update_nowait = False self.select_for_update_skip_locked = False self.select_for_update_of = () self.select_related = False # Arbitrary limit for select_related to prevents infinite recursion. self.max_depth = 5 # Holds the selects defined by a call to values() or values_list() # excluding annotation_select and extra_select. self.values_select = () # SQL annotation-related attributes self.annotations = {} # Maps alias -> Annotation Expression self.annotation_select_mask = None self._annotation_select_cache = None # Set combination attributes self.combinator = None self.combinator_all = False self.combined_queries = () # These are for extensions. The contents are more or less appended # verbatim to the appropriate clause. self.extra = {} # Maps col_alias -> (col_sql, params). self.extra_select_mask = None self._extra_select_cache = None self.extra_tables = () self.extra_order_by = () # A tuple that is a set of model field names and either True, if these # are the fields to defer, or False if these are the only fields to # load. self.deferred_loading = (frozenset(), True) self._filtered_relations = {} self.explain_query = False self.explain_format = None self.explain_options = {} @property def output_field(self): if len(self.select) == 1: return self.select[0].field elif len(self.annotation_select) == 1: return next(iter(self.annotation_select.values())).output_field @property def has_select_fields(self): return bool(self.select or self.annotation_select_mask or self.extra_select_mask) @cached_property def base_table(self): for alias in self.alias_map: return alias def __str__(self): """ Return the query as a string of SQL with the parameter values substituted in (use sql_with_params() to see the unsubstituted string). Parameter values won't necessarily be quoted correctly, since that is done by the database interface at execution time. """ sql, params = self.sql_with_params() return sql % params def sql_with_params(self): """ Return the query as an SQL string and the parameters that will be substituted into the query. """ return self.get_compiler(DEFAULT_DB_ALIAS).as_sql() def __deepcopy__(self, memo): """Limit the amount of work when a Query is deepcopied.""" result = self.clone() memo[id(self)] = result return result def get_compiler(self, using=None, connection=None): if using is None and connection is None: raise ValueError("Need either using or connection") if using: connection = connections[using] return connection.ops.compiler(self.compiler)(self, connection, using) def get_meta(self): """ Return the Options instance (the model._meta) from which to start processing. Normally, this is self.model._meta, but it can be changed by subclasses. """ return self.model._meta def clone(self): """ Return a copy of the current Query. A lightweight alternative to to deepcopy(). """ obj = Empty() obj.__class__ = self.__class__ # Copy references to everything. obj.__dict__ = self.__dict__.copy() # Clone attributes that can't use shallow copy. obj.alias_refcount = self.alias_refcount.copy() obj.alias_map = self.alias_map.copy() obj.external_aliases = self.external_aliases.copy() obj.table_map = self.table_map.copy() obj.where = self.where.clone() obj.annotations = self.annotations.copy() if self.annotation_select_mask is None: obj.annotation_select_mask = None else: obj.annotation_select_mask = self.annotation_select_mask.copy() # _annotation_select_cache cannot be copied, as doing so breaks the # (necessary) state in which both annotations and # _annotation_select_cache point to the same underlying objects. # It will get re-populated in the cloned queryset the next time it's # used. obj._annotation_select_cache = None obj.extra = self.extra.copy() if self.extra_select_mask is None: obj.extra_select_mask = None else: obj.extra_select_mask = self.extra_select_mask.copy() if self._extra_select_cache is None: obj._extra_select_cache = None else: obj._extra_select_cache = self._extra_select_cache.copy() if 'subq_aliases' in self.__dict__: obj.subq_aliases = self.subq_aliases.copy() obj.used_aliases = self.used_aliases.copy() obj._filtered_relations = self._filtered_relations.copy() # Clear the cached_property try: del obj.base_table except AttributeError: pass return obj def chain(self, klass=None): """ Return a copy of the current Query that's ready for another operation. The klass argument changes the type of the Query, e.g. UpdateQuery. """ obj = self.clone() if klass and obj.__class__ != klass: obj.__class__ = klass if not obj.filter_is_sticky: obj.used_aliases = set() obj.filter_is_sticky = False if hasattr(obj, '_setup_query'): obj._setup_query() return obj def relabeled_clone(self, change_map): clone = self.clone() clone.change_aliases(change_map) return clone def rewrite_cols(self, annotation, col_cnt): # We must make sure the inner query has the referred columns in it. # If we are aggregating over an annotation, then Django uses Ref() # instances to note this. However, if we are annotating over a column # of a related model, then it might be that column isn't part of the # SELECT clause of the inner query, and we must manually make sure # the column is selected. An example case is: # .aggregate(Sum('author__awards')) # Resolving this expression results in a join to author, but there # is no guarantee the awards column of author is in the select clause # of the query. Thus we must manually add the column to the inner # query. orig_exprs = annotation.get_source_expressions() new_exprs = [] for expr in orig_exprs: # FIXME: These conditions are fairly arbitrary. Identify a better # method of having expressions decide which code path they should # take. if isinstance(expr, Ref): # Its already a Ref to subquery (see resolve_ref() for # details) new_exprs.append(expr) elif isinstance(expr, (WhereNode, Lookup)): # Decompose the subexpressions further. The code here is # copied from the else clause, but this condition must appear # before the contains_aggregate/is_summary condition below. new_expr, col_cnt = self.rewrite_cols(expr, col_cnt) new_exprs.append(new_expr) else: # Reuse aliases of expressions already selected in subquery. for col_alias, selected_annotation in self.annotation_select.items(): if selected_annotation == expr: new_expr = Ref(col_alias, expr) break else: # An expression that is not selected the subquery. if isinstance(expr, Col) or (expr.contains_aggregate and not expr.is_summary): # Reference column or another aggregate. Select it # under a non-conflicting alias. col_cnt += 1 col_alias = '__col%d' % col_cnt self.annotations[col_alias] = expr self.append_annotation_mask([col_alias]) new_expr = Ref(col_alias, expr) else: # Some other expression not referencing database values # directly. Its subexpression might contain Cols. new_expr, col_cnt = self.rewrite_cols(expr, col_cnt) new_exprs.append(new_expr) annotation.set_source_expressions(new_exprs) return annotation, col_cnt def get_aggregation(self, using, added_aggregate_names): """ Return the dictionary with the values of the existing aggregations. """ if not self.annotation_select: return {} existing_annotations = [ annotation for alias, annotation in self.annotations.items() if alias not in added_aggregate_names ] # Decide if we need to use a subquery. # # Existing annotations would cause incorrect results as get_aggregation() # must produce just one result and thus must not use GROUP BY. But we # aren't smart enough to remove the existing annotations from the # query, so those would force us to use GROUP BY. # # If the query has limit or distinct, or uses set operations, then # those operations must be done in a subquery so that the query # aggregates on the limit and/or distinct results instead of applying # the distinct and limit after the aggregation. if (isinstance(self.group_by, tuple) or self.is_sliced or existing_annotations or self.distinct or self.combinator): from django.db.models.sql.subqueries import AggregateQuery outer_query = AggregateQuery(self.model) inner_query = self.clone() inner_query.select_for_update = False inner_query.select_related = False inner_query.set_annotation_mask(self.annotation_select) if not self.is_sliced and not self.distinct_fields: # Queries with distinct_fields need ordering and when a limit # is applied we must take the slice from the ordered query. # Otherwise no need for ordering. inner_query.clear_ordering(True) if not inner_query.distinct: # If the inner query uses default select and it has some # aggregate annotations, then we must make sure the inner # query is grouped by the main model's primary key. However, # clearing the select clause can alter results if distinct is # used. has_existing_aggregate_annotations = any( annotation for annotation in existing_annotations if getattr(annotation, 'contains_aggregate', True) ) if inner_query.default_cols and has_existing_aggregate_annotations: inner_query.group_by = (self.model._meta.pk.get_col(inner_query.get_initial_alias()),) inner_query.default_cols = False relabels = {t: 'subquery' for t in inner_query.alias_map} relabels[None] = 'subquery' # Remove any aggregates marked for reduction from the subquery # and move them to the outer AggregateQuery. col_cnt = 0 for alias, expression in list(inner_query.annotation_select.items()): annotation_select_mask = inner_query.annotation_select_mask if expression.is_summary: expression, col_cnt = inner_query.rewrite_cols(expression, col_cnt) outer_query.annotations[alias] = expression.relabeled_clone(relabels) del inner_query.annotations[alias] annotation_select_mask.remove(alias) # Make sure the annotation_select wont use cached results. inner_query.set_annotation_mask(inner_query.annotation_select_mask) if inner_query.select == () and not inner_query.default_cols and not inner_query.annotation_select_mask: # In case of Model.objects[0:3].count(), there would be no # field selected in the inner query, yet we must use a subquery. # So, make sure at least one field is selected. inner_query.select = (self.model._meta.pk.get_col(inner_query.get_initial_alias()),) try: outer_query.add_subquery(inner_query, using) except EmptyResultSet: return { alias: None for alias in outer_query.annotation_select } else: outer_query = self self.select = () self.default_cols = False self.extra = {} outer_query.clear_ordering(True) outer_query.clear_limits() outer_query.select_for_update = False outer_query.select_related = False compiler = outer_query.get_compiler(using) result = compiler.execute_sql(SINGLE) if result is None: result = [None] * len(outer_query.annotation_select) converters = compiler.get_converters(outer_query.annotation_select.values()) result = next(compiler.apply_converters((result,), converters)) return dict(zip(outer_query.annotation_select, result)) def get_count(self, using): """ Perform a COUNT() query using the current filter constraints. """ obj = self.clone() obj.add_annotation(Count('*'), alias='__count', is_summary=True) number = obj.get_aggregation(using, ['__count'])['__count'] if number is None: number = 0 return number def has_filters(self): return self.where def has_results(self, using): q = self.clone() if not q.distinct: if q.group_by is True: q.add_fields((f.attname for f in self.model._meta.concrete_fields), False) q.set_group_by() q.clear_select_clause() q.clear_ordering(True) q.set_limits(high=1) compiler = q.get_compiler(using=using) return compiler.has_results() def explain(self, using, format=None, **options): q = self.clone() q.explain_query = True q.explain_format = format q.explain_options = options compiler = q.get_compiler(using=using) return '\n'.join(compiler.explain_query()) def combine(self, rhs, connector): """ Merge the 'rhs' query into the current one (with any 'rhs' effects being applied *after* (that is, "to the right of") anything in the current query. 'rhs' is not modified during a call to this function. The 'connector' parameter describes how to connect filters from the 'rhs' query. """ assert self.model == rhs.model, \ "Cannot combine queries on two different base models." assert not self.is_sliced, \ "Cannot combine queries once a slice has been taken." assert self.distinct == rhs.distinct, \ "Cannot combine a unique query with a non-unique query." assert self.distinct_fields == rhs.distinct_fields, \ "Cannot combine queries with different distinct fields." # Work out how to relabel the rhs aliases, if necessary. change_map = {} conjunction = (connector == AND) # Determine which existing joins can be reused. When combining the # query with AND we must recreate all joins for m2m filters. When # combining with OR we can reuse joins. The reason is that in AND # case a single row can't fulfill a condition like: # revrel__col=1 & revrel__col=2 # But, there might be two different related rows matching this # condition. In OR case a single True is enough, so single row is # enough, too. # # Note that we will be creating duplicate joins for non-m2m joins in # the AND case. The results will be correct but this creates too many # joins. This is something that could be fixed later on. reuse = set() if conjunction else set(self.alias_map) # Base table must be present in the query - this is the same # table on both sides. self.get_initial_alias() joinpromoter = JoinPromoter(connector, 2, False) joinpromoter.add_votes( j for j in self.alias_map if self.alias_map[j].join_type == INNER) rhs_votes = set() # Now, add the joins from rhs query into the new query (skipping base # table). rhs_tables = list(rhs.alias_map)[1:] for alias in rhs_tables: join = rhs.alias_map[alias] # If the left side of the join was already relabeled, use the # updated alias. join = join.relabeled_clone(change_map) new_alias = self.join(join, reuse=reuse) if join.join_type == INNER: rhs_votes.add(new_alias) # We can't reuse the same join again in the query. If we have two # distinct joins for the same connection in rhs query, then the # combined query must have two joins, too. reuse.discard(new_alias) if alias != new_alias: change_map[alias] = new_alias if not rhs.alias_refcount[alias]: # The alias was unused in the rhs query. Unref it so that it # will be unused in the new query, too. We have to add and # unref the alias so that join promotion has information of # the join type for the unused alias. self.unref_alias(new_alias) joinpromoter.add_votes(rhs_votes) joinpromoter.update_join_types(self) # Now relabel a copy of the rhs where-clause and add it to the current # one. w = rhs.where.clone() w.relabel_aliases(change_map) self.where.add(w, connector) # Selection columns and extra extensions are those provided by 'rhs'. if rhs.select: self.set_select([col.relabeled_clone(change_map) for col in rhs.select]) else: self.select = () if connector == OR: # It would be nice to be able to handle this, but the queries don't # really make sense (or return consistent value sets). Not worth # the extra complexity when you can write a real query instead. if self.extra and rhs.extra: raise ValueError("When merging querysets using 'or', you cannot have extra(select=...) on both sides.") self.extra.update(rhs.extra) extra_select_mask = set() if self.extra_select_mask is not None: extra_select_mask.update(self.extra_select_mask) if rhs.extra_select_mask is not None: extra_select_mask.update(rhs.extra_select_mask) if extra_select_mask: self.set_extra_mask(extra_select_mask) self.extra_tables += rhs.extra_tables # Ordering uses the 'rhs' ordering, unless it has none, in which case # the current ordering is used. self.order_by = rhs.order_by or self.order_by self.extra_order_by = rhs.extra_order_by or self.extra_order_by def deferred_to_data(self, target, callback): """ Convert the self.deferred_loading data structure to an alternate data structure, describing the field that *will* be loaded. This is used to compute the columns to select from the database and also by the QuerySet class to work out which fields are being initialized on each model. Models that have all their fields included aren't mentioned in the result, only those that have field restrictions in place. The "target" parameter is the instance that is populated (in place). The "callback" is a function that is called whenever a (model, field) pair need to be added to "target". It accepts three parameters: "target", and the model and list of fields being added for that model. """ field_names, defer = self.deferred_loading if not field_names: return orig_opts = self.get_meta() seen = {} must_include = {orig_opts.concrete_model: {orig_opts.pk}} for field_name in field_names: parts = field_name.split(LOOKUP_SEP) cur_model = self.model._meta.concrete_model opts = orig_opts for name in parts[:-1]: old_model = cur_model if name in self._filtered_relations: name = self._filtered_relations[name].relation_name source = opts.get_field(name) if is_reverse_o2o(source): cur_model = source.related_model else: cur_model = source.remote_field.model opts = cur_model._meta # Even if we're "just passing through" this model, we must add # both the current model's pk and the related reference field # (if it's not a reverse relation) to the things we select. if not is_reverse_o2o(source): must_include[old_model].add(source) add_to_dict(must_include, cur_model, opts.pk) field = opts.get_field(parts[-1]) is_reverse_object = field.auto_created and not field.concrete model = field.related_model if is_reverse_object else field.model model = model._meta.concrete_model if model == opts.model: model = cur_model if not is_reverse_o2o(field): add_to_dict(seen, model, field) if defer: # We need to load all fields for each model, except those that # appear in "seen" (for all models that appear in "seen"). The only # slight complexity here is handling fields that exist on parent # models. workset = {} for model, values in seen.items(): for field in model._meta.local_fields: if field not in values: m = field.model._meta.concrete_model add_to_dict(workset, m, field) for model, values in must_include.items(): # If we haven't included a model in workset, we don't add the # corresponding must_include fields for that model, since an # empty set means "include all fields". That's why there's no # "else" branch here. if model in workset: workset[model].update(values) for model, values in workset.items(): callback(target, model, values) else: for model, values in must_include.items(): if model in seen: seen[model].update(values) else: # As we've passed through this model, but not explicitly # included any fields, we have to make sure it's mentioned # so that only the "must include" fields are pulled in. seen[model] = values # Now ensure that every model in the inheritance chain is mentioned # in the parent list. Again, it must be mentioned to ensure that # only "must include" fields are pulled in. for model in orig_opts.get_parent_list(): seen.setdefault(model, set()) for model, values in seen.items(): callback(target, model, values) def table_alias(self, table_name, create=False, filtered_relation=None): """ Return a table alias for the given table_name and whether this is a new alias or not. If 'create' is true, a new alias is always created. Otherwise, the most recently created alias for the table (if one exists) is reused. """ alias_list = self.table_map.get(table_name) if not create and alias_list: alias = alias_list[0] self.alias_refcount[alias] += 1 return alias, False # Create a new alias for this table. if alias_list: alias = '%s%d' % (self.alias_prefix, len(self.alias_map) + 1) alias_list.append(alias) else: # The first occurrence of a table uses the table name directly. alias = filtered_relation.alias if filtered_relation is not None else table_name self.table_map[table_name] = [alias] self.alias_refcount[alias] = 1 return alias, True def ref_alias(self, alias): """Increases the reference count for this alias.""" self.alias_refcount[alias] += 1 def unref_alias(self, alias, amount=1): """Decreases the reference count for this alias.""" self.alias_refcount[alias] -= amount def promote_joins(self, aliases): """ Promote recursively the join type of given aliases and its children to an outer join. If 'unconditional' is False, only promote the join if it is nullable or the parent join is an outer join. The children promotion is done to avoid join chains that contain a LOUTER b INNER c. So, if we have currently a INNER b INNER c and a->b is promoted, then we must also promote b->c automatically, or otherwise the promotion of a->b doesn't actually change anything in the query results. """ aliases = list(aliases) while aliases: alias = aliases.pop(0) if self.alias_map[alias].join_type is None: # This is the base table (first FROM entry) - this table # isn't really joined at all in the query, so we should not # alter its join type. continue # Only the first alias (skipped above) should have None join_type assert self.alias_map[alias].join_type is not None parent_alias = self.alias_map[alias].parent_alias parent_louter = parent_alias and self.alias_map[parent_alias].join_type == LOUTER already_louter = self.alias_map[alias].join_type == LOUTER if ((self.alias_map[alias].nullable or parent_louter) and not already_louter): self.alias_map[alias] = self.alias_map[alias].promote() # Join type of 'alias' changed, so re-examine all aliases that # refer to this one. aliases.extend( join for join in self.alias_map if self.alias_map[join].parent_alias == alias and join not in aliases ) def demote_joins(self, aliases): """ Change join type from LOUTER to INNER for all joins in aliases. Similarly to promote_joins(), this method must ensure no join chains containing first an outer, then an inner join are generated. If we are demoting b->c join in chain a LOUTER b LOUTER c then we must demote a->b automatically, or otherwise the demotion of b->c doesn't actually change anything in the query results. . """ aliases = list(aliases) while aliases: alias = aliases.pop(0) if self.alias_map[alias].join_type == LOUTER: self.alias_map[alias] = self.alias_map[alias].demote() parent_alias = self.alias_map[alias].parent_alias if self.alias_map[parent_alias].join_type == INNER: aliases.append(parent_alias) def reset_refcounts(self, to_counts): """ Reset reference counts for aliases so that they match the value passed in `to_counts`. """ for alias, cur_refcount in self.alias_refcount.copy().items(): unref_amount = cur_refcount - to_counts.get(alias, 0) self.unref_alias(alias, unref_amount) def change_aliases(self, change_map): """ Change the aliases in change_map (which maps old-alias -> new-alias), relabelling any references to them in select columns and the where clause. """ assert set(change_map).isdisjoint(change_map.values()) # 1. Update references in "select" (normal columns plus aliases), # "group by" and "where". self.where.relabel_aliases(change_map) if isinstance(self.group_by, tuple): self.group_by = tuple([col.relabeled_clone(change_map) for col in self.group_by]) self.select = tuple([col.relabeled_clone(change_map) for col in self.select]) self.annotations = self.annotations and { key: col.relabeled_clone(change_map) for key, col in self.annotations.items() } # 2. Rename the alias in the internal table/alias datastructures. for old_alias, new_alias in change_map.items(): if old_alias not in self.alias_map: continue alias_data = self.alias_map[old_alias].relabeled_clone(change_map) self.alias_map[new_alias] = alias_data self.alias_refcount[new_alias] = self.alias_refcount[old_alias] del self.alias_refcount[old_alias] del self.alias_map[old_alias] table_aliases = self.table_map[alias_data.table_name] for pos, alias in enumerate(table_aliases): if alias == old_alias: table_aliases[pos] = new_alias break self.external_aliases = {change_map.get(alias, alias) for alias in self.external_aliases} def bump_prefix(self, outer_query): """ Change the alias prefix to the next letter in the alphabet in a way that the outer query's aliases and this query's aliases will not conflict. Even tables that previously had no alias will get an alias after this call. """ def prefix_gen(): """ Generate a sequence of characters in alphabetical order: -> 'A', 'B', 'C', ... When the alphabet is finished, the sequence will continue with the Cartesian product: -> 'AA', 'AB', 'AC', ... """ alphabet = ascii_uppercase prefix = chr(ord(self.alias_prefix) + 1) yield prefix for n in count(1): seq = alphabet[alphabet.index(prefix):] if prefix else alphabet for s in product(seq, repeat=n): yield ''.join(s) prefix = None if self.alias_prefix != outer_query.alias_prefix: # No clashes between self and outer query should be possible. return # Explicitly avoid infinite loop. The constant divider is based on how # much depth recursive subquery references add to the stack. This value # might need to be adjusted when adding or removing function calls from # the code path in charge of performing these operations. local_recursion_limit = sys.getrecursionlimit() // 16 for pos, prefix in enumerate(prefix_gen()): if prefix not in self.subq_aliases: self.alias_prefix = prefix break if pos > local_recursion_limit: raise RecursionError( 'Maximum recursion depth exceeded: too many subqueries.' ) self.subq_aliases = self.subq_aliases.union([self.alias_prefix]) outer_query.subq_aliases = outer_query.subq_aliases.union(self.subq_aliases) self.change_aliases({ alias: '%s%d' % (self.alias_prefix, pos) for pos, alias in enumerate(self.alias_map) }) def get_initial_alias(self): """ Return the first alias for this query, after increasing its reference count. """ if self.alias_map: alias = self.base_table self.ref_alias(alias) else: alias = self.join(BaseTable(self.get_meta().db_table, None)) return alias def count_active_tables(self): """ Return the number of tables in this query with a non-zero reference count. After execution, the reference counts are zeroed, so tables added in compiler will not be seen by this method. """ return len([1 for count in self.alias_refcount.values() if count]) def join(self, join, reuse=None, reuse_with_filtered_relation=False): """ Return an alias for the 'join', either reusing an existing alias for that join or creating a new one. 'join' is either a sql.datastructures.BaseTable or Join. The 'reuse' parameter can be either None which means all joins are reusable, or it can be a set containing the aliases that can be reused. The 'reuse_with_filtered_relation' parameter is used when computing FilteredRelation instances. A join is always created as LOUTER if the lhs alias is LOUTER to make sure chains like t1 LOUTER t2 INNER t3 aren't generated. All new joins are created as LOUTER if the join is nullable. """ if reuse_with_filtered_relation and reuse: reuse_aliases = [ a for a, j in self.alias_map.items() if a in reuse and j.equals(join, with_filtered_relation=False) ] else: reuse_aliases = [ a for a, j in self.alias_map.items() if (reuse is None or a in reuse) and j == join ] if reuse_aliases: if join.table_alias in reuse_aliases: reuse_alias = join.table_alias else: # Reuse the most recent alias of the joined table # (a many-to-many relation may be joined multiple times). reuse_alias = reuse_aliases[-1] self.ref_alias(reuse_alias) return reuse_alias # No reuse is possible, so we need a new alias. alias, _ = self.table_alias(join.table_name, create=True, filtered_relation=join.filtered_relation) if join.join_type: if self.alias_map[join.parent_alias].join_type == LOUTER or join.nullable: join_type = LOUTER else: join_type = INNER join.join_type = join_type join.table_alias = alias self.alias_map[alias] = join return alias def join_parent_model(self, opts, model, alias, seen): """ Make sure the given 'model' is joined in the query. If 'model' isn't a parent of 'opts' or if it is None this method is a no-op. The 'alias' is the root alias for starting the join, 'seen' is a dict of model -> alias of existing joins. It must also contain a mapping of None -> some alias. This will be returned in the no-op case. """ if model in seen: return seen[model] chain = opts.get_base_chain(model) if not chain: return alias curr_opts = opts for int_model in chain: if int_model in seen: curr_opts = int_model._meta alias = seen[int_model] continue # Proxy model have elements in base chain # with no parents, assign the new options # object and skip to the next base in that # case if not curr_opts.parents[int_model]: curr_opts = int_model._meta continue link_field = curr_opts.get_ancestor_link(int_model) join_info = self.setup_joins([link_field.name], curr_opts, alias) curr_opts = int_model._meta alias = seen[int_model] = join_info.joins[-1] return alias or seen[None] def add_annotation(self, annotation, alias, is_summary=False): """Add a single annotation expression to the Query.""" annotation = annotation.resolve_expression(self, allow_joins=True, reuse=None, summarize=is_summary) self.append_annotation_mask([alias]) self.annotations[alias] = annotation def resolve_expression(self, query, *args, **kwargs): clone = self.clone() # Subqueries need to use a different set of aliases than the outer query. clone.bump_prefix(query) clone.subquery = True # It's safe to drop ordering if the queryset isn't using slicing, # distinct(*fields) or select_for_update(). if (self.low_mark == 0 and self.high_mark is None and not self.distinct_fields and not self.select_for_update): clone.clear_ordering(True) clone.where.resolve_expression(query, *args, **kwargs) for key, value in clone.annotations.items(): resolved = value.resolve_expression(query, *args, **kwargs) if hasattr(resolved, 'external_aliases'): resolved.external_aliases.update(clone.alias_map) clone.annotations[key] = resolved # Outer query's aliases are considered external. clone.external_aliases.update( alias for alias, table in query.alias_map.items() if ( isinstance(table, Join) and table.join_field.related_model._meta.db_table != alias ) or ( isinstance(table, BaseTable) and table.table_name != table.table_alias ) ) return clone def as_sql(self, compiler, connection): sql, params = self.get_compiler(connection=connection).as_sql() if self.subquery: sql = '(%s)' % sql return sql, params def resolve_lookup_value(self, value, can_reuse, allow_joins, simple_col): if hasattr(value, 'resolve_expression'): kwargs = {'reuse': can_reuse, 'allow_joins': allow_joins} if isinstance(value, F): kwargs['simple_col'] = simple_col value = value.resolve_expression(self, **kwargs) elif isinstance(value, (list, tuple)): # The items of the iterable may be expressions and therefore need # to be resolved independently. for sub_value in value: if hasattr(sub_value, 'resolve_expression'): if isinstance(sub_value, F): sub_value.resolve_expression( self, reuse=can_reuse, allow_joins=allow_joins, simple_col=simple_col, ) else: sub_value.resolve_expression(self, reuse=can_reuse, allow_joins=allow_joins) return value def solve_lookup_type(self, lookup): """ Solve the lookup type from the lookup (e.g.: 'foobar__id__icontains'). """ lookup_splitted = lookup.split(LOOKUP_SEP) if self.annotations: expression, expression_lookups = refs_expression(lookup_splitted, self.annotations) if expression: return expression_lookups, (), expression _, field, _, lookup_parts = self.names_to_path(lookup_splitted, self.get_meta()) field_parts = lookup_splitted[0:len(lookup_splitted) - len(lookup_parts)] if len(lookup_parts) > 1 and not field_parts: raise FieldError( 'Invalid lookup "%s" for model %s".' % (lookup, self.get_meta().model.__name__) ) return lookup_parts, field_parts, False def check_query_object_type(self, value, opts, field): """ Check whether the object passed while querying is of the correct type. If not, raise a ValueError specifying the wrong object. """ if hasattr(value, '_meta'): if not check_rel_lookup_compatibility(value._meta.model, opts, field): raise ValueError( 'Cannot query "%s": Must be "%s" instance.' % (value, opts.object_name)) def check_related_objects(self, field, value, opts): """Check the type of object passed to query relations.""" if field.is_relation: # Check that the field and the queryset use the same model in a # query like .filter(author=Author.objects.all()). For example, the # opts would be Author's (from the author field) and value.model # would be Author.objects.all() queryset's .model (Author also). # The field is the related field on the lhs side. if (isinstance(value, Query) and not value.has_select_fields and not check_rel_lookup_compatibility(value.model, opts, field)): raise ValueError( 'Cannot use QuerySet for "%s": Use a QuerySet for "%s".' % (value.model._meta.object_name, opts.object_name) ) elif hasattr(value, '_meta'): self.check_query_object_type(value, opts, field) elif hasattr(value, '__iter__'): for v in value: self.check_query_object_type(v, opts, field) def build_lookup(self, lookups, lhs, rhs): """ Try to extract transforms and lookup from given lhs. The lhs value is something that works like SQLExpression. The rhs value is what the lookup is going to compare against. The lookups is a list of names to extract using get_lookup() and get_transform(). """ # __exact is the default lookup if one isn't given. *transforms, lookup_name = lookups or ['exact'] for name in transforms: lhs = self.try_transform(lhs, name) # First try get_lookup() so that the lookup takes precedence if the lhs # supports both transform and lookup for the name. lookup_class = lhs.get_lookup(lookup_name) if not lookup_class: if lhs.field.is_relation: raise FieldError('Related Field got invalid lookup: {}'.format(lookup_name)) # A lookup wasn't found. Try to interpret the name as a transform # and do an Exact lookup against it. lhs = self.try_transform(lhs, lookup_name) lookup_name = 'exact' lookup_class = lhs.get_lookup(lookup_name) if not lookup_class: return lookup = lookup_class(lhs, rhs) # Interpret '__exact=None' as the sql 'is NULL'; otherwise, reject all # uses of None as a query value unless the lookup supports it. if lookup.rhs is None and not lookup.can_use_none_as_rhs: if lookup_name not in ('exact', 'iexact'): raise ValueError("Cannot use None as a query value") return lhs.get_lookup('isnull')(lhs, True) # For Oracle '' is equivalent to null. The check must be done at this # stage because join promotion can't be done in the compiler. Using # DEFAULT_DB_ALIAS isn't nice but it's the best that can be done here. # A similar thing is done in is_nullable(), too. if (connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls and lookup_name == 'exact' and lookup.rhs == ''): return lhs.get_lookup('isnull')(lhs, True) return lookup def try_transform(self, lhs, name): """ Helper method for build_lookup(). Try to fetch and initialize a transform for name parameter from lhs. """ transform_class = lhs.get_transform(name) if transform_class: return transform_class(lhs) else: output_field = lhs.output_field.__class__ suggested_lookups = difflib.get_close_matches(name, output_field.get_lookups()) if suggested_lookups: suggestion = ', perhaps you meant %s?' % ' or '.join(suggested_lookups) else: suggestion = '.' raise FieldError( "Unsupported lookup '%s' for %s or join on the field not " "permitted%s" % (name, output_field.__name__, suggestion) ) def build_filter(self, filter_expr, branch_negated=False, current_negated=False, can_reuse=None, allow_joins=True, split_subq=True, reuse_with_filtered_relation=False, simple_col=False): """ Build a WhereNode for a single filter clause but don't add it to this Query. Query.add_q() will then add this filter to the where Node. The 'branch_negated' tells us if the current branch contains any negations. This will be used to determine if subqueries are needed. The 'current_negated' is used to determine if the current filter is negated or not and this will be used to determine if IS NULL filtering is needed. The difference between current_negated and branch_negated is that branch_negated is set on first negation, but current_negated is flipped for each negation. Note that add_filter will not do any negating itself, that is done upper in the code by add_q(). The 'can_reuse' is a set of reusable joins for multijoins. If 'reuse_with_filtered_relation' is True, then only joins in can_reuse will be reused. The method will create a filter clause that can be added to the current query. However, if the filter isn't added to the query then the caller is responsible for unreffing the joins used. """ if isinstance(filter_expr, dict): raise FieldError("Cannot parse keyword query as dict") arg, value = filter_expr if not arg: raise FieldError("Cannot parse keyword query %r" % arg) lookups, parts, reffed_expression = self.solve_lookup_type(arg) if not getattr(reffed_expression, 'filterable', True): raise NotSupportedError( reffed_expression.__class__.__name__ + ' is disallowed in ' 'the filter clause.' ) if not allow_joins and len(parts) > 1: raise FieldError("Joined field references are not permitted in this query") pre_joins = self.alias_refcount.copy() value = self.resolve_lookup_value(value, can_reuse, allow_joins, simple_col) used_joins = {k for k, v in self.alias_refcount.items() if v > pre_joins.get(k, 0)} clause = self.where_class() if reffed_expression: condition = self.build_lookup(lookups, reffed_expression, value) clause.add(condition, AND) return clause, [] opts = self.get_meta() alias = self.get_initial_alias() allow_many = not branch_negated or not split_subq try: join_info = self.setup_joins( parts, opts, alias, can_reuse=can_reuse, allow_many=allow_many, reuse_with_filtered_relation=reuse_with_filtered_relation, ) # Prevent iterator from being consumed by check_related_objects() if isinstance(value, Iterator): value = list(value) self.check_related_objects(join_info.final_field, value, join_info.opts) # split_exclude() needs to know which joins were generated for the # lookup parts self._lookup_joins = join_info.joins except MultiJoin as e: return self.split_exclude(filter_expr, can_reuse, e.names_with_path) # Update used_joins before trimming since they are reused to determine # which joins could be later promoted to INNER. used_joins.update(join_info.joins) targets, alias, join_list = self.trim_joins(join_info.targets, join_info.joins, join_info.path) if can_reuse is not None: can_reuse.update(join_list) if join_info.final_field.is_relation: # No support for transforms for relational fields num_lookups = len(lookups) if num_lookups > 1: raise FieldError('Related Field got invalid lookup: {}'.format(lookups[0])) if len(targets) == 1: col = _get_col(targets[0], join_info.final_field, alias, simple_col) else: col = MultiColSource(alias, targets, join_info.targets, join_info.final_field) else: col = _get_col(targets[0], join_info.final_field, alias, simple_col) condition = self.build_lookup(lookups, col, value) lookup_type = condition.lookup_name clause.add(condition, AND) require_outer = lookup_type == 'isnull' and condition.rhs is True and not current_negated if current_negated and (lookup_type != 'isnull' or condition.rhs is False) and condition.rhs is not None: require_outer = True if (lookup_type != 'isnull' and ( self.is_nullable(targets[0]) or self.alias_map[join_list[-1]].join_type == LOUTER)): # The condition added here will be SQL like this: # NOT (col IS NOT NULL), where the first NOT is added in # upper layers of code. The reason for addition is that if col # is null, then col != someval will result in SQL "unknown" # which isn't the same as in Python. The Python None handling # is wanted, and it can be gotten by # (col IS NULL OR col != someval) # <=> # NOT (col IS NOT NULL AND col = someval). lookup_class = targets[0].get_lookup('isnull') col = _get_col(targets[0], join_info.targets[0], alias, simple_col) clause.add(lookup_class(col, False), AND) return clause, used_joins if not require_outer else () def add_filter(self, filter_clause): self.add_q(Q(**{filter_clause[0]: filter_clause[1]})) def add_q(self, q_object): """ A preprocessor for the internal _add_q(). Responsible for doing final join promotion. """ # For join promotion this case is doing an AND for the added q_object # and existing conditions. So, any existing inner join forces the join # type to remain inner. Existing outer joins can however be demoted. # (Consider case where rel_a is LOUTER and rel_a__col=1 is added - if # rel_a doesn't produce any rows, then the whole condition must fail. # So, demotion is OK. existing_inner = {a for a in self.alias_map if self.alias_map[a].join_type == INNER} clause, _ = self._add_q(q_object, self.used_aliases) if clause: self.where.add(clause, AND) self.demote_joins(existing_inner) def build_where(self, q_object): return self._add_q(q_object, used_aliases=set(), allow_joins=False, simple_col=True)[0] def _add_q(self, q_object, used_aliases, branch_negated=False, current_negated=False, allow_joins=True, split_subq=True, simple_col=False): """Add a Q-object to the current filter.""" connector = q_object.connector current_negated = current_negated ^ q_object.negated branch_negated = branch_negated or q_object.negated target_clause = self.where_class(connector=connector, negated=q_object.negated) joinpromoter = JoinPromoter(q_object.connector, len(q_object.children), current_negated) for child in q_object.children: if isinstance(child, Node): child_clause, needed_inner = self._add_q( child, used_aliases, branch_negated, current_negated, allow_joins, split_subq, simple_col) joinpromoter.add_votes(needed_inner) else: child_clause, needed_inner = self.build_filter( child, can_reuse=used_aliases, branch_negated=branch_negated, current_negated=current_negated, allow_joins=allow_joins, split_subq=split_subq, simple_col=simple_col, ) joinpromoter.add_votes(needed_inner) if child_clause: target_clause.add(child_clause, connector) needed_inner = joinpromoter.update_join_types(self) return target_clause, needed_inner def build_filtered_relation_q(self, q_object, reuse, branch_negated=False, current_negated=False): """Add a FilteredRelation object to the current filter.""" connector = q_object.connector current_negated ^= q_object.negated branch_negated = branch_negated or q_object.negated target_clause = self.where_class(connector=connector, negated=q_object.negated) for child in q_object.children: if isinstance(child, Node): child_clause = self.build_filtered_relation_q( child, reuse=reuse, branch_negated=branch_negated, current_negated=current_negated, ) else: child_clause, _ = self.build_filter( child, can_reuse=reuse, branch_negated=branch_negated, current_negated=current_negated, allow_joins=True, split_subq=False, reuse_with_filtered_relation=True, ) target_clause.add(child_clause, connector) return target_clause def add_filtered_relation(self, filtered_relation, alias): filtered_relation.alias = alias lookups = dict(get_children_from_q(filtered_relation.condition)) for lookup in chain((filtered_relation.relation_name,), lookups): lookup_parts, field_parts, _ = self.solve_lookup_type(lookup) shift = 2 if not lookup_parts else 1 if len(field_parts) > (shift + len(lookup_parts)): raise ValueError( "FilteredRelation's condition doesn't support nested " "relations (got %r)." % lookup ) self._filtered_relations[filtered_relation.alias] = filtered_relation def names_to_path(self, names, opts, allow_many=True, fail_on_missing=False): """ Walk the list of names and turns them into PathInfo tuples. A single name in 'names' can generate multiple PathInfos (m2m, for example). 'names' is the path of names to travel, 'opts' is the model Options we start the name resolving from, 'allow_many' is as for setup_joins(). If fail_on_missing is set to True, then a name that can't be resolved will generate a FieldError. Return a list of PathInfo tuples. In addition return the final field (the last used join field) and target (which is a field guaranteed to contain the same value as the final field). Finally, return those names that weren't found (which are likely transforms and the final lookup). """ path, names_with_path = [], [] for pos, name in enumerate(names): cur_names_with_path = (name, []) if name == 'pk': name = opts.pk.name field = None filtered_relation = None try: field = opts.get_field(name) except FieldDoesNotExist: if name in self.annotation_select: field = self.annotation_select[name].output_field elif name in self._filtered_relations and pos == 0: filtered_relation = self._filtered_relations[name] field = opts.get_field(filtered_relation.relation_name) if field is not None: # Fields that contain one-to-many relations with a generic # model (like a GenericForeignKey) cannot generate reverse # relations and therefore cannot be used for reverse querying. if field.is_relation and not field.related_model: raise FieldError( "Field %r does not generate an automatic reverse " "relation and therefore cannot be used for reverse " "querying. If it is a GenericForeignKey, consider " "adding a GenericRelation." % name ) try: model = field.model._meta.concrete_model except AttributeError: # QuerySet.annotate() may introduce fields that aren't # attached to a model. model = None else: # We didn't find the current field, so move position back # one step. pos -= 1 if pos == -1 or fail_on_missing: available = sorted([ *get_field_names_from_opts(opts), *self.annotation_select, *self._filtered_relations, ]) raise FieldError("Cannot resolve keyword '%s' into field. " "Choices are: %s" % (name, ", ".join(available))) break # Check if we need any joins for concrete inheritance cases (the # field lives in parent, but we are currently in one of its # children) if model is not opts.model: path_to_parent = opts.get_path_to_parent(model) if path_to_parent: path.extend(path_to_parent) cur_names_with_path[1].extend(path_to_parent) opts = path_to_parent[-1].to_opts if hasattr(field, 'get_path_info'): pathinfos = field.get_path_info(filtered_relation) if not allow_many: for inner_pos, p in enumerate(pathinfos): if p.m2m: cur_names_with_path[1].extend(pathinfos[0:inner_pos + 1]) names_with_path.append(cur_names_with_path) raise MultiJoin(pos + 1, names_with_path) last = pathinfos[-1] path.extend(pathinfos) final_field = last.join_field opts = last.to_opts targets = last.target_fields cur_names_with_path[1].extend(pathinfos) names_with_path.append(cur_names_with_path) else: # Local non-relational field. final_field = field targets = (field,) if fail_on_missing and pos + 1 != len(names): raise FieldError( "Cannot resolve keyword %r into field. Join on '%s'" " not permitted." % (names[pos + 1], name)) break return path, final_field, targets, names[pos + 1:] def setup_joins(self, names, opts, alias, can_reuse=None, allow_many=True, reuse_with_filtered_relation=False): """ Compute the necessary table joins for the passage through the fields given in 'names'. 'opts' is the Options class for the current model (which gives the table we are starting from), 'alias' is the alias for the table to start the joining from. The 'can_reuse' defines the reverse foreign key joins we can reuse. It can be None in which case all joins are reusable or a set of aliases that can be reused. Note that non-reverse foreign keys are always reusable when using setup_joins(). The 'reuse_with_filtered_relation' can be used to force 'can_reuse' parameter and force the relation on the given connections. If 'allow_many' is False, then any reverse foreign key seen will generate a MultiJoin exception. Return the final field involved in the joins, the target field (used for any 'where' constraint), the final 'opts' value, the joins, the field path traveled to generate the joins, and a transform function that takes a field and alias and is equivalent to `field.get_col(alias)` in the simple case but wraps field transforms if they were included in names. The target field is the field containing the concrete value. Final field can be something different, for example foreign key pointing to that value. Final field is needed for example in some value conversions (convert 'obj' in fk__id=obj to pk val using the foreign key field for example). """ joins = [alias] # The transform can't be applied yet, as joins must be trimmed later. # To avoid making every caller of this method look up transforms # directly, compute transforms here and create a partial that converts # fields to the appropriate wrapped version. def final_transformer(field, alias): return field.get_col(alias) # Try resolving all the names as fields first. If there's an error, # treat trailing names as lookups until a field can be resolved. last_field_exception = None for pivot in range(len(names), 0, -1): try: path, final_field, targets, rest = self.names_to_path( names[:pivot], opts, allow_many, fail_on_missing=True, ) except FieldError as exc: if pivot == 1: # The first item cannot be a lookup, so it's safe # to raise the field error here. raise else: last_field_exception = exc else: # The transforms are the remaining items that couldn't be # resolved into fields. transforms = names[pivot:] break for name in transforms: def transform(field, alias, *, name, previous): try: wrapped = previous(field, alias) return self.try_transform(wrapped, name) except FieldError: # FieldError is raised if the transform doesn't exist. if isinstance(final_field, Field) and last_field_exception: raise last_field_exception else: raise final_transformer = functools.partial(transform, name=name, previous=final_transformer) # Then, add the path to the query's joins. Note that we can't trim # joins at this stage - we will need the information about join type # of the trimmed joins. for join in path: if join.filtered_relation: filtered_relation = join.filtered_relation.clone() table_alias = filtered_relation.alias else: filtered_relation = None table_alias = None opts = join.to_opts if join.direct: nullable = self.is_nullable(join.join_field) else: nullable = True connection = Join( opts.db_table, alias, table_alias, INNER, join.join_field, nullable, filtered_relation=filtered_relation, ) reuse = can_reuse if join.m2m or reuse_with_filtered_relation else None alias = self.join( connection, reuse=reuse, reuse_with_filtered_relation=reuse_with_filtered_relation, ) joins.append(alias) if filtered_relation: filtered_relation.path = joins[:] return JoinInfo(final_field, targets, opts, joins, path, final_transformer) def trim_joins(self, targets, joins, path): """ The 'target' parameter is the final field being joined to, 'joins' is the full list of join aliases. The 'path' contain the PathInfos used to create the joins. Return the final target field and table alias and the new active joins. Always trim any direct join if the target column is already in the previous table. Can't trim reverse joins as it's unknown if there's anything on the other side of the join. """ joins = joins[:] for pos, info in enumerate(reversed(path)): if len(joins) == 1 or not info.direct: break if info.filtered_relation: break join_targets = {t.column for t in info.join_field.foreign_related_fields} cur_targets = {t.column for t in targets} if not cur_targets.issubset(join_targets): break targets_dict = {r[1].column: r[0] for r in info.join_field.related_fields if r[1].column in cur_targets} targets = tuple(targets_dict[t.column] for t in targets) self.unref_alias(joins.pop()) return targets, joins[-1], joins @classmethod def _gen_col_aliases(cls, exprs): for expr in exprs: if isinstance(expr, Col): yield expr.alias else: yield from cls._gen_col_aliases(expr.get_source_expressions()) def resolve_ref(self, name, allow_joins=True, reuse=None, summarize=False, simple_col=False): if not allow_joins and LOOKUP_SEP in name: raise FieldError("Joined field references are not permitted in this query") annotation = self.annotations.get(name) if annotation is not None: if not allow_joins: for alias in self._gen_col_aliases([annotation]): if isinstance(self.alias_map[alias], Join): raise FieldError( 'Joined field references are not permitted in ' 'this query' ) if summarize: # Summarize currently means we are doing an aggregate() query # which is executed as a wrapped subquery if any of the # aggregate() elements reference an existing annotation. In # that case we need to return a Ref to the subquery's annotation. return Ref(name, self.annotation_select[name]) else: return annotation else: field_list = name.split(LOOKUP_SEP) join_info = self.setup_joins(field_list, self.get_meta(), self.get_initial_alias(), can_reuse=reuse) targets, final_alias, join_list = self.trim_joins(join_info.targets, join_info.joins, join_info.path) if not allow_joins and len(join_list) > 1: raise FieldError('Joined field references are not permitted in this query') if len(targets) > 1: raise FieldError("Referencing multicolumn fields with F() objects " "isn't supported") # Verify that the last lookup in name is a field or a transform: # transform_function() raises FieldError if not. join_info.transform_function(targets[0], final_alias) if reuse is not None: reuse.update(join_list) col = _get_col(targets[0], join_info.targets[0], join_list[-1], simple_col) return col def split_exclude(self, filter_expr, can_reuse, names_with_path): """ When doing an exclude against any kind of N-to-many relation, we need to use a subquery. This method constructs the nested query, given the original exclude filter (filter_expr) and the portion up to the first N-to-many relation field. For example, if the origin filter is ~Q(child__name='foo'), filter_expr is ('child__name', 'foo') and can_reuse is a set of joins usable for filters in the original query. We will turn this into equivalent of: WHERE NOT (pk IN (SELECT parent_id FROM thetable WHERE name = 'foo' AND parent_id IS NOT NULL)) It might be worth it to consider using WHERE NOT EXISTS as that has saner null handling, and is easier for the backend's optimizer to handle. """ filter_lhs, filter_rhs = filter_expr if isinstance(filter_rhs, F): filter_expr = (filter_lhs, OuterRef(filter_rhs.name)) # Generate the inner query. query = Query(self.model) query._filtered_relations = self._filtered_relations query.add_filter(filter_expr) query.clear_ordering(True) # Try to have as simple as possible subquery -> trim leading joins from # the subquery. trimmed_prefix, contains_louter = query.trim_start(names_with_path) # Add extra check to make sure the selected field will not be null # since we are adding an IN <subquery> clause. This prevents the # database from tripping over IN (...,NULL,...) selects and returning # nothing col = query.select[0] select_field = col.target alias = col.alias if self.is_nullable(select_field): lookup_class = select_field.get_lookup('isnull') lookup = lookup_class(select_field.get_col(alias), False) query.where.add(lookup, AND) if alias in can_reuse: pk = select_field.model._meta.pk # Need to add a restriction so that outer query's filters are in effect for # the subquery, too. query.bump_prefix(self) lookup_class = select_field.get_lookup('exact') # Note that the query.select[0].alias is different from alias # due to bump_prefix above. lookup = lookup_class(pk.get_col(query.select[0].alias), pk.get_col(alias)) query.where.add(lookup, AND) query.external_aliases.add(alias) condition, needed_inner = self.build_filter( ('%s__in' % trimmed_prefix, query), current_negated=True, branch_negated=True, can_reuse=can_reuse) if contains_louter: or_null_condition, _ = self.build_filter( ('%s__isnull' % trimmed_prefix, True), current_negated=True, branch_negated=True, can_reuse=can_reuse) condition.add(or_null_condition, OR) # Note that the end result will be: # (outercol NOT IN innerq AND outercol IS NOT NULL) OR outercol IS NULL. # This might look crazy but due to how IN works, this seems to be # correct. If the IS NOT NULL check is removed then outercol NOT # IN will return UNKNOWN. If the IS NULL check is removed, then if # outercol IS NULL we will not match the row. return condition, needed_inner def set_empty(self): self.where.add(NothingNode(), AND) def is_empty(self): return any(isinstance(c, NothingNode) for c in self.where.children) def set_limits(self, low=None, high=None): """ Adjust the limits on the rows retrieved. Use low/high to set these, as it makes it more Pythonic to read and write. When the SQL query is created, convert them to the appropriate offset and limit values. Apply any limits passed in here to the existing constraints. Add low to the current low value and clamp both to any existing high value. """ if high is not None: if self.high_mark is not None: self.high_mark = min(self.high_mark, self.low_mark + high) else: self.high_mark = self.low_mark + high if low is not None: if self.high_mark is not None: self.low_mark = min(self.high_mark, self.low_mark + low) else: self.low_mark = self.low_mark + low if self.low_mark == self.high_mark: self.set_empty() def clear_limits(self): """Clear any existing limits.""" self.low_mark, self.high_mark = 0, None @property def is_sliced(self): return self.low_mark != 0 or self.high_mark is not None def has_limit_one(self): return self.high_mark is not None and (self.high_mark - self.low_mark) == 1 def can_filter(self): """ Return True if adding filters to this instance is still possible. Typically, this means no limits or offsets have been put on the results. """ return not self.is_sliced def clear_select_clause(self): """Remove all fields from SELECT clause.""" self.select = () self.default_cols = False self.select_related = False self.set_extra_mask(()) self.set_annotation_mask(()) def clear_select_fields(self): """ Clear the list of fields to select (but not extra_select columns). Some queryset types completely replace any existing list of select columns. """ self.select = () self.values_select = () def add_select_col(self, col): self.select += col, self.values_select += col.output_field.name, def set_select(self, cols): self.default_cols = False self.select = tuple(cols) def add_distinct_fields(self, *field_names): """ Add and resolve the given fields to the query's "distinct on" clause. """ self.distinct_fields = field_names self.distinct = True def add_fields(self, field_names, allow_m2m=True): """ Add the given (model) fields to the select set. Add the field names in the order specified. """ alias = self.get_initial_alias() opts = self.get_meta() try: cols = [] for name in field_names: # Join promotion note - we must not remove any rows here, so # if there is no existing joins, use outer join. join_info = self.setup_joins(name.split(LOOKUP_SEP), opts, alias, allow_many=allow_m2m) targets, final_alias, joins = self.trim_joins( join_info.targets, join_info.joins, join_info.path, ) for target in targets: cols.append(join_info.transform_function(target, final_alias)) if cols: self.set_select(cols) except MultiJoin: raise FieldError("Invalid field name: '%s'" % name) except FieldError: if LOOKUP_SEP in name: # For lookups spanning over relationships, show the error # from the model on which the lookup failed. raise else: names = sorted([ *get_field_names_from_opts(opts), *self.extra, *self.annotation_select, *self._filtered_relations ]) raise FieldError("Cannot resolve keyword %r into field. " "Choices are: %s" % (name, ", ".join(names))) def add_ordering(self, *ordering): """ Add items from the 'ordering' sequence to the query's "order by" clause. These items are either field names (not column names) -- possibly with a direction prefix ('-' or '?') -- or OrderBy expressions. If 'ordering' is empty, clear all ordering from the query. """ errors = [] for item in ordering: if not hasattr(item, 'resolve_expression') and not ORDER_PATTERN.match(item): errors.append(item) if getattr(item, 'contains_aggregate', False): raise FieldError( 'Using an aggregate in order_by() without also including ' 'it in annotate() is not allowed: %s' % item ) if errors: raise FieldError('Invalid order_by arguments: %s' % errors) if ordering: self.order_by += ordering else: self.default_ordering = False def clear_ordering(self, force_empty): """ Remove any ordering settings. If 'force_empty' is True, there will be no ordering in the resulting query (not even the model's default). """ self.order_by = () self.extra_order_by = () if force_empty: self.default_ordering = False def set_group_by(self): """ Expand the GROUP BY clause required by the query. This will usually be the set of all non-aggregate fields in the return data. If the database backend supports grouping by the primary key, and the query would be equivalent, the optimization will be made automatically. """ group_by = list(self.select) if self.annotation_select: for alias, annotation in self.annotation_select.items(): try: inspect.getcallargs(annotation.get_group_by_cols, alias=alias) except TypeError: annotation_class = annotation.__class__ msg = ( '`alias=None` must be added to the signature of ' '%s.%s.get_group_by_cols().' ) % (annotation_class.__module__, annotation_class.__qualname__) warnings.warn(msg, category=RemovedInDjango40Warning) group_by_cols = annotation.get_group_by_cols() else: group_by_cols = annotation.get_group_by_cols(alias=alias) group_by.extend(group_by_cols) self.group_by = tuple(group_by) def add_select_related(self, fields): """ Set up the select_related data structure so that we only select certain related models (as opposed to all models, when self.select_related=True). """ if isinstance(self.select_related, bool): field_dict = {} else: field_dict = self.select_related for field in fields: d = field_dict for part in field.split(LOOKUP_SEP): d = d.setdefault(part, {}) self.select_related = field_dict def add_extra(self, select, select_params, where, params, tables, order_by): """ Add data to the various extra_* attributes for user-created additions to the query. """ if select: # We need to pair any placeholder markers in the 'select' # dictionary with their parameters in 'select_params' so that # subsequent updates to the select dictionary also adjust the # parameters appropriately. select_pairs = {} if select_params: param_iter = iter(select_params) else: param_iter = iter([]) for name, entry in select.items(): entry = str(entry) entry_params = [] pos = entry.find("%s") while pos != -1: if pos == 0 or entry[pos - 1] != '%': entry_params.append(next(param_iter)) pos = entry.find("%s", pos + 2) select_pairs[name] = (entry, entry_params) self.extra.update(select_pairs) if where or params: self.where.add(ExtraWhere(where, params), AND) if tables: self.extra_tables += tuple(tables) if order_by: self.extra_order_by = order_by def clear_deferred_loading(self): """Remove any fields from the deferred loading set.""" self.deferred_loading = (frozenset(), True) def add_deferred_loading(self, field_names): """ Add the given list of model field names to the set of fields to exclude from loading from the database when automatic column selection is done. Add the new field names to any existing field names that are deferred (or removed from any existing field names that are marked as the only ones for immediate loading). """ # Fields on related models are stored in the literal double-underscore # format, so that we can use a set datastructure. We do the foo__bar # splitting and handling when computing the SQL column names (as part of # get_columns()). existing, defer = self.deferred_loading if defer: # Add to existing deferred names. self.deferred_loading = existing.union(field_names), True else: # Remove names from the set of any existing "immediate load" names. self.deferred_loading = existing.difference(field_names), False def add_immediate_loading(self, field_names): """ Add the given list of model field names to the set of fields to retrieve when the SQL is executed ("immediate loading" fields). The field names replace any existing immediate loading field names. If there are field names already specified for deferred loading, remove those names from the new field_names before storing the new names for immediate loading. (That is, immediate loading overrides any existing immediate values, but respects existing deferrals.) """ existing, defer = self.deferred_loading field_names = set(field_names) if 'pk' in field_names: field_names.remove('pk') field_names.add(self.get_meta().pk.name) if defer: # Remove any existing deferred names from the current set before # setting the new names. self.deferred_loading = field_names.difference(existing), False else: # Replace any existing "immediate load" field names. self.deferred_loading = frozenset(field_names), False def get_loaded_field_names(self): """ If any fields are marked to be deferred, return a dictionary mapping models to a set of names in those fields that will be loaded. If a model is not in the returned dictionary, none of its fields are deferred. If no fields are marked for deferral, return an empty dictionary. """ # We cache this because we call this function multiple times # (compiler.fill_related_selections, query.iterator) try: return self._loaded_field_names_cache except AttributeError: collection = {} self.deferred_to_data(collection, self.get_loaded_field_names_cb) self._loaded_field_names_cache = collection return collection def get_loaded_field_names_cb(self, target, model, fields): """Callback used by get_deferred_field_names().""" target[model] = {f.attname for f in fields} def set_annotation_mask(self, names): """Set the mask of annotations that will be returned by the SELECT.""" if names is None: self.annotation_select_mask = None else: self.annotation_select_mask = set(names) self._annotation_select_cache = None def append_annotation_mask(self, names): if self.annotation_select_mask is not None: self.set_annotation_mask(self.annotation_select_mask.union(names)) def set_extra_mask(self, names): """ Set the mask of extra select items that will be returned by SELECT. Don't remove them from the Query since they might be used later. """ if names is None: self.extra_select_mask = None else: self.extra_select_mask = set(names) self._extra_select_cache = None def set_values(self, fields): self.select_related = False self.clear_deferred_loading() self.clear_select_fields() if self.group_by is True: self.add_fields((f.attname for f in self.model._meta.concrete_fields), False) self.set_group_by() self.clear_select_fields() if fields: field_names = [] extra_names = [] annotation_names = [] if not self.extra and not self.annotations: # Shortcut - if there are no extra or annotations, then # the values() clause must be just field names. field_names = list(fields) else: self.default_cols = False for f in fields: if f in self.extra_select: extra_names.append(f) elif f in self.annotation_select: annotation_names.append(f) else: field_names.append(f) self.set_extra_mask(extra_names) self.set_annotation_mask(annotation_names) else: field_names = [f.attname for f in self.model._meta.concrete_fields] self.values_select = tuple(field_names) self.add_fields(field_names, True) @property def annotation_select(self): """ Return the dictionary of aggregate columns that are not masked and should be used in the SELECT clause. Cache this result for performance. """ if self._annotation_select_cache is not None: return self._annotation_select_cache elif not self.annotations: return {} elif self.annotation_select_mask is not None: self._annotation_select_cache = { k: v for k, v in self.annotations.items() if k in self.annotation_select_mask } return self._annotation_select_cache else: return self.annotations @property def extra_select(self): if self._extra_select_cache is not None: return self._extra_select_cache if not self.extra: return {} elif self.extra_select_mask is not None: self._extra_select_cache = { k: v for k, v in self.extra.items() if k in self.extra_select_mask } return self._extra_select_cache else: return self.extra def trim_start(self, names_with_path): """ Trim joins from the start of the join path. The candidates for trim are the PathInfos in names_with_path structure that are m2m joins. Also set the select column so the start matches the join. This method is meant to be used for generating the subquery joins & cols in split_exclude(). Return a lookup usable for doing outerq.filter(lookup=self) and a boolean indicating if the joins in the prefix contain a LEFT OUTER join. _""" all_paths = [] for _, paths in names_with_path: all_paths.extend(paths) contains_louter = False # Trim and operate only on tables that were generated for # the lookup part of the query. That is, avoid trimming # joins generated for F() expressions. lookup_tables = [ t for t in self.alias_map if t in self._lookup_joins or t == self.base_table ] for trimmed_paths, path in enumerate(all_paths): if path.m2m: break if self.alias_map[lookup_tables[trimmed_paths + 1]].join_type == LOUTER: contains_louter = True alias = lookup_tables[trimmed_paths] self.unref_alias(alias) # The path.join_field is a Rel, lets get the other side's field join_field = path.join_field.field # Build the filter prefix. paths_in_prefix = trimmed_paths trimmed_prefix = [] for name, path in names_with_path: if paths_in_prefix - len(path) < 0: break trimmed_prefix.append(name) paths_in_prefix -= len(path) trimmed_prefix.append( join_field.foreign_related_fields[0].name) trimmed_prefix = LOOKUP_SEP.join(trimmed_prefix) # Lets still see if we can trim the first join from the inner query # (that is, self). We can't do this for: # - LEFT JOINs because we would miss those rows that have nothing on # the outer side, # - INNER JOINs from filtered relations because we would miss their # filters. first_join = self.alias_map[lookup_tables[trimmed_paths + 1]] if first_join.join_type != LOUTER and not first_join.filtered_relation: select_fields = [r[0] for r in join_field.related_fields] select_alias = lookup_tables[trimmed_paths + 1] self.unref_alias(lookup_tables[trimmed_paths]) extra_restriction = join_field.get_extra_restriction( self.where_class, None, lookup_tables[trimmed_paths + 1]) if extra_restriction: self.where.add(extra_restriction, AND) else: # TODO: It might be possible to trim more joins from the start of the # inner query if it happens to have a longer join chain containing the # values in select_fields. Lets punt this one for now. select_fields = [r[1] for r in join_field.related_fields] select_alias = lookup_tables[trimmed_paths] # The found starting point is likely a Join instead of a BaseTable reference. # But the first entry in the query's FROM clause must not be a JOIN. for table in self.alias_map: if self.alias_refcount[table] > 0: self.alias_map[table] = BaseTable(self.alias_map[table].table_name, table) break self.set_select([f.get_col(select_alias) for f in select_fields]) return trimmed_prefix, contains_louter def is_nullable(self, field): """ Check if the given field should be treated as nullable. Some backends treat '' as null and Django treats such fields as nullable for those backends. In such situations field.null can be False even if we should treat the field as nullable. """ # We need to use DEFAULT_DB_ALIAS here, as QuerySet does not have # (nor should it have) knowledge of which connection is going to be # used. The proper fix would be to defer all decisions where # is_nullable() is needed to the compiler stage, but that is not easy # to do currently. return ( connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls and field.empty_strings_allowed ) or field.null def get_order_dir(field, default='ASC'): """ Return the field name and direction for an order specification. For example, '-foo' is returned as ('foo', 'DESC'). The 'default' param is used to indicate which way no prefix (or a '+' prefix) should sort. The '-' prefix always sorts the opposite way. """ dirn = ORDER_DIR[default] if field[0] == '-': return field[1:], dirn[1] return field, dirn[0] def add_to_dict(data, key, value): """ Add "value" to the set of values for "key", whether or not "key" already exists. """ if key in data: data[key].add(value) else: data[key] = {value} def is_reverse_o2o(field): """ Check if the given field is reverse-o2o. The field is expected to be some sort of relation field or related object. """ return field.is_relation and field.one_to_one and not field.concrete class JoinPromoter: """ A class to abstract away join promotion problems for complex filter conditions. """ def __init__(self, connector, num_children, negated): self.connector = connector self.negated = negated if self.negated: if connector == AND: self.effective_connector = OR else: self.effective_connector = AND else: self.effective_connector = self.connector self.num_children = num_children # Maps of table alias to how many times it is seen as required for # inner and/or outer joins. self.votes = Counter() def add_votes(self, votes): """ Add single vote per item to self.votes. Parameter can be any iterable. """ self.votes.update(votes) def update_join_types(self, query): """ Change join types so that the generated query is as efficient as possible, but still correct. So, change as many joins as possible to INNER, but don't make OUTER joins INNER if that could remove results from the query. """ to_promote = set() to_demote = set() # The effective_connector is used so that NOT (a AND b) is treated # similarly to (a OR b) for join promotion. for table, votes in self.votes.items(): # We must use outer joins in OR case when the join isn't contained # in all of the joins. Otherwise the INNER JOIN itself could remove # valid results. Consider the case where a model with rel_a and # rel_b relations is queried with rel_a__col=1 | rel_b__col=2. Now, # if rel_a join doesn't produce any results is null (for example # reverse foreign key or null value in direct foreign key), and # there is a matching row in rel_b with col=2, then an INNER join # to rel_a would remove a valid match from the query. So, we need # to promote any existing INNER to LOUTER (it is possible this # promotion in turn will be demoted later on). if self.effective_connector == 'OR' and votes < self.num_children: to_promote.add(table) # If connector is AND and there is a filter that can match only # when there is a joinable row, then use INNER. For example, in # rel_a__col=1 & rel_b__col=2, if either of the rels produce NULL # as join output, then the col=1 or col=2 can't match (as # NULL=anything is always false). # For the OR case, if all children voted for a join to be inner, # then we can use INNER for the join. For example: # (rel_a__col__icontains=Alex | rel_a__col__icontains=Russell) # then if rel_a doesn't produce any rows, the whole condition # can't match. Hence we can safely use INNER join. if self.effective_connector == 'AND' or ( self.effective_connector == 'OR' and votes == self.num_children): to_demote.add(table) # Finally, what happens in cases where we have: # (rel_a__col=1|rel_b__col=2) & rel_a__col__gte=0 # Now, we first generate the OR clause, and promote joins for it # in the first if branch above. Both rel_a and rel_b are promoted # to LOUTER joins. After that we do the AND case. The OR case # voted no inner joins but the rel_a__col__gte=0 votes inner join # for rel_a. We demote it back to INNER join (in AND case a single # vote is enough). The demotion is OK, if rel_a doesn't produce # rows, then the rel_a__col__gte=0 clause can't be true, and thus # the whole clause must be false. So, it is safe to use INNER # join. # Note that in this example we could just as well have the __gte # clause and the OR clause swapped. Or we could replace the __gte # clause with an OR clause containing rel_a__col=1|rel_a__col=2, # and again we could safely demote to INNER. query.promote_joins(to_promote) query.demote_joins(to_demote) return to_demote
e76aa5cf04a115a0de5653ee12df57a6b7dd348492355080a9a28063f2acdc87
import collections import re import warnings from itertools import chain from django.core.exceptions import EmptyResultSet, FieldError from django.db.models.constants import LOOKUP_SEP from django.db.models.expressions import OrderBy, Random, RawSQL, Ref, Value from django.db.models.functions import Cast from django.db.models.query_utils import QueryWrapper, select_related_descend from django.db.models.sql.constants import ( CURSOR, GET_ITERATOR_CHUNK_SIZE, MULTI, NO_RESULTS, ORDER_DIR, SINGLE, ) from django.db.models.sql.query import Query, get_order_dir from django.db.transaction import TransactionManagementError from django.db.utils import DatabaseError, NotSupportedError from django.utils.deprecation import RemovedInDjango31Warning from django.utils.hashable import make_hashable FORCE = object() class SQLCompiler: def __init__(self, query, connection, using): self.query = query self.connection = connection self.using = using self.quote_cache = {'*': '*'} # The select, klass_info, and annotations are needed by QuerySet.iterator() # these are set as a side-effect of executing the query. Note that we calculate # separately a list of extra select columns needed for grammatical correctness # of the query, but these columns are not included in self.select. self.select = None self.annotation_col_map = None self.klass_info = None # Multiline ordering SQL clause may appear from RawSQL. self.ordering_parts = re.compile(r'^(.*)\s(ASC|DESC)(.*)', re.MULTILINE | re.DOTALL) self._meta_ordering = None def setup_query(self): if all(self.query.alias_refcount[a] == 0 for a in self.query.alias_map): self.query.get_initial_alias() self.select, self.klass_info, self.annotation_col_map = self.get_select() self.col_count = len(self.select) def pre_sql_setup(self): """ Do any necessary class setup immediately prior to producing SQL. This is for things that can't necessarily be done in __init__ because we might not have all the pieces in place at that time. """ self.setup_query() order_by = self.get_order_by() self.where, self.having = self.query.where.split_having() extra_select = self.get_extra_select(order_by, self.select) self.has_extra_select = bool(extra_select) group_by = self.get_group_by(self.select + extra_select, order_by) return extra_select, order_by, group_by def get_group_by(self, select, order_by): """ Return a list of 2-tuples of form (sql, params). The logic of what exactly the GROUP BY clause contains is hard to describe in other words than "if it passes the test suite, then it is correct". """ # Some examples: # SomeModel.objects.annotate(Count('somecol')) # GROUP BY: all fields of the model # # SomeModel.objects.values('name').annotate(Count('somecol')) # GROUP BY: name # # SomeModel.objects.annotate(Count('somecol')).values('name') # GROUP BY: all cols of the model # # SomeModel.objects.values('name', 'pk').annotate(Count('somecol')).values('pk') # GROUP BY: name, pk # # SomeModel.objects.values('name').annotate(Count('somecol')).values('pk') # GROUP BY: name, pk # # In fact, the self.query.group_by is the minimal set to GROUP BY. It # can't be ever restricted to a smaller set, but additional columns in # HAVING, ORDER BY, and SELECT clauses are added to it. Unfortunately # the end result is that it is impossible to force the query to have # a chosen GROUP BY clause - you can almost do this by using the form: # .values(*wanted_cols).annotate(AnAggregate()) # but any later annotations, extra selects, values calls that # refer some column outside of the wanted_cols, order_by, or even # filter calls can alter the GROUP BY clause. # The query.group_by is either None (no GROUP BY at all), True # (group by select fields), or a list of expressions to be added # to the group by. if self.query.group_by is None: return [] expressions = [] if self.query.group_by is not True: # If the group by is set to a list (by .values() call most likely), # then we need to add everything in it to the GROUP BY clause. # Backwards compatibility hack for setting query.group_by. Remove # when we have public API way of forcing the GROUP BY clause. # Converts string references to expressions. for expr in self.query.group_by: if not hasattr(expr, 'as_sql'): expressions.append(self.query.resolve_ref(expr)) else: expressions.append(expr) # Note that even if the group_by is set, it is only the minimal # set to group by. So, we need to add cols in select, order_by, and # having into the select in any case. for expr, _, _ in select: cols = expr.get_group_by_cols() for col in cols: expressions.append(col) for expr, (sql, params, is_ref) in order_by: # Skip References to the select clause, as all expressions in the # select clause are already part of the group by. if not expr.contains_aggregate and not is_ref: expressions.extend(expr.get_source_expressions()) having_group_by = self.having.get_group_by_cols() if self.having else () for expr in having_group_by: expressions.append(expr) result = [] seen = set() expressions = self.collapse_group_by(expressions, having_group_by) for expr in expressions: sql, params = self.compile(expr) params_hash = make_hashable(params) if (sql, params_hash) not in seen: result.append((sql, params)) seen.add((sql, params_hash)) return result def collapse_group_by(self, expressions, having): # If the DB can group by primary key, then group by the primary key of # query's main model. Note that for PostgreSQL the GROUP BY clause must # include the primary key of every table, but for MySQL it is enough to # have the main table's primary key. if self.connection.features.allows_group_by_pk: # Determine if the main model's primary key is in the query. pk = None for expr in expressions: # Is this a reference to query's base table primary key? If the # expression isn't a Col-like, then skip the expression. if (getattr(expr, 'target', None) == self.query.model._meta.pk and getattr(expr, 'alias', None) == self.query.base_table): pk = expr break # If the main model's primary key is in the query, group by that # field, HAVING expressions, and expressions associated with tables # that don't have a primary key included in the grouped columns. if pk: pk_aliases = { expr.alias for expr in expressions if hasattr(expr, 'target') and expr.target.primary_key } expressions = [pk] + [ expr for expr in expressions if expr in having or ( getattr(expr, 'alias', None) is not None and expr.alias not in pk_aliases ) ] elif self.connection.features.allows_group_by_selected_pks: # Filter out all expressions associated with a table's primary key # present in the grouped columns. This is done by identifying all # tables that have their primary key included in the grouped # columns and removing non-primary key columns referring to them. # Unmanaged models are excluded because they could be representing # database views on which the optimization might not be allowed. pks = { expr for expr in expressions if hasattr(expr, 'target') and expr.target.primary_key and expr.target.model._meta.managed } aliases = {expr.alias for expr in pks} expressions = [ expr for expr in expressions if expr in pks or getattr(expr, 'alias', None) not in aliases ] return expressions def get_select(self): """ Return three values: - a list of 3-tuples of (expression, (sql, params), alias) - a klass_info structure, - a dictionary of annotations The (sql, params) is what the expression will produce, and alias is the "AS alias" for the column (possibly None). The klass_info structure contains the following information: - The base model of the query. - Which columns for that model are present in the query (by position of the select clause). - related_klass_infos: [f, klass_info] to descent into The annotations is a dictionary of {'attname': column position} values. """ select = [] klass_info = None annotations = {} select_idx = 0 for alias, (sql, params) in self.query.extra_select.items(): annotations[alias] = select_idx select.append((RawSQL(sql, params), alias)) select_idx += 1 assert not (self.query.select and self.query.default_cols) if self.query.default_cols: cols = self.get_default_columns() else: # self.query.select is a special case. These columns never go to # any model. cols = self.query.select if cols: select_list = [] for col in cols: select_list.append(select_idx) select.append((col, None)) select_idx += 1 klass_info = { 'model': self.query.model, 'select_fields': select_list, } for alias, annotation in self.query.annotation_select.items(): annotations[alias] = select_idx select.append((annotation, alias)) select_idx += 1 if self.query.select_related: related_klass_infos = self.get_related_selections(select) klass_info['related_klass_infos'] = related_klass_infos def get_select_from_parent(klass_info): for ki in klass_info['related_klass_infos']: if ki['from_parent']: ki['select_fields'] = (klass_info['select_fields'] + ki['select_fields']) get_select_from_parent(ki) get_select_from_parent(klass_info) ret = [] for col, alias in select: try: sql, params = self.compile(col, select_format=True) except EmptyResultSet: # Select a predicate that's always False. sql, params = '0', () ret.append((col, (sql, params), alias)) return ret, klass_info, annotations def get_order_by(self): """ Return a list of 2-tuples of form (expr, (sql, params, is_ref)) for the ORDER BY clause. The order_by clause can alter the select clause (for example it can add aliases to clauses that do not yet have one, or it can add totally new select clauses). """ if self.query.extra_order_by: ordering = self.query.extra_order_by elif not self.query.default_ordering: ordering = self.query.order_by elif self.query.order_by: ordering = self.query.order_by elif self.query.get_meta().ordering: ordering = self.query.get_meta().ordering self._meta_ordering = ordering else: ordering = [] if self.query.standard_ordering: asc, desc = ORDER_DIR['ASC'] else: asc, desc = ORDER_DIR['DESC'] order_by = [] for field in ordering: if hasattr(field, 'resolve_expression'): if isinstance(field, Value): # output_field must be resolved for constants. field = Cast(field, field.output_field) if not isinstance(field, OrderBy): field = field.asc() if not self.query.standard_ordering: field = field.copy() field.reverse_ordering() order_by.append((field, False)) continue if field == '?': # random order_by.append((OrderBy(Random()), False)) continue col, order = get_order_dir(field, asc) descending = order == 'DESC' if col in self.query.annotation_select: # Reference to expression in SELECT clause order_by.append(( OrderBy(Ref(col, self.query.annotation_select[col]), descending=descending), True)) continue if col in self.query.annotations: # References to an expression which is masked out of the SELECT # clause. expr = self.query.annotations[col] if isinstance(expr, Value): # output_field must be resolved for constants. expr = Cast(expr, expr.output_field) order_by.append((OrderBy(expr, descending=descending), False)) continue if '.' in field: # This came in through an extra(order_by=...) addition. Pass it # on verbatim. table, col = col.split('.', 1) order_by.append(( OrderBy( RawSQL('%s.%s' % (self.quote_name_unless_alias(table), col), []), descending=descending ), False)) continue if not self.query.extra or col not in self.query.extra: # 'col' is of the form 'field' or 'field1__field2' or # '-field1__field2__field', etc. order_by.extend(self.find_ordering_name( field, self.query.get_meta(), default_order=asc)) else: if col not in self.query.extra_select: order_by.append(( OrderBy(RawSQL(*self.query.extra[col]), descending=descending), False)) else: order_by.append(( OrderBy(Ref(col, RawSQL(*self.query.extra[col])), descending=descending), True)) result = [] seen = set() for expr, is_ref in order_by: resolved = expr.resolve_expression(self.query, allow_joins=True, reuse=None) if self.query.combinator: src = resolved.get_source_expressions()[0] # Relabel order by columns to raw numbers if this is a combined # query; necessary since the columns can't be referenced by the # fully qualified name and the simple column names may collide. for idx, (sel_expr, _, col_alias) in enumerate(self.select): if is_ref and col_alias == src.refs: src = src.source elif col_alias: continue if src == sel_expr: resolved.set_source_expressions([RawSQL('%d' % (idx + 1), ())]) break else: if col_alias: raise DatabaseError('ORDER BY term does not match any column in the result set.') # Add column used in ORDER BY clause without an alias to # the selected columns. self.query.add_select_col(src) resolved.set_source_expressions([RawSQL('%d' % len(self.query.select), ())]) sql, params = self.compile(resolved) # Don't add the same column twice, but the order direction is # not taken into account so we strip it. When this entire method # is refactored into expressions, then we can check each part as we # generate it. without_ordering = self.ordering_parts.search(sql).group(1) params_hash = make_hashable(params) if (without_ordering, params_hash) in seen: continue seen.add((without_ordering, params_hash)) result.append((resolved, (sql, params, is_ref))) return result def get_extra_select(self, order_by, select): extra_select = [] if self.query.distinct and not self.query.distinct_fields: select_sql = [t[1] for t in select] for expr, (sql, params, is_ref) in order_by: without_ordering = self.ordering_parts.search(sql).group(1) if not is_ref and (without_ordering, params) not in select_sql: extra_select.append((expr, (without_ordering, params), None)) return extra_select def quote_name_unless_alias(self, name): """ A wrapper around connection.ops.quote_name that doesn't quote aliases for table names. This avoids problems with some SQL dialects that treat quoted strings specially (e.g. PostgreSQL). """ if name in self.quote_cache: return self.quote_cache[name] if ((name in self.query.alias_map and name not in self.query.table_map) or name in self.query.extra_select or ( name in self.query.external_aliases and name not in self.query.table_map)): self.quote_cache[name] = name return name r = self.connection.ops.quote_name(name) self.quote_cache[name] = r return r def compile(self, node, select_format=False): vendor_impl = getattr(node, 'as_' + self.connection.vendor, None) if vendor_impl: sql, params = vendor_impl(self, self.connection) else: sql, params = node.as_sql(self, self.connection) if select_format is FORCE or (select_format and not self.query.subquery): return node.output_field.select_format(self, sql, params) return sql, params def get_combinator_sql(self, combinator, all): features = self.connection.features compilers = [ query.get_compiler(self.using, self.connection) for query in self.query.combined_queries if not query.is_empty() ] if not features.supports_slicing_ordering_in_compound: for query, compiler in zip(self.query.combined_queries, compilers): if query.low_mark or query.high_mark: raise DatabaseError('LIMIT/OFFSET not allowed in subqueries of compound statements.') if compiler.get_order_by(): raise DatabaseError('ORDER BY not allowed in subqueries of compound statements.') parts = () for compiler in compilers: try: # If the columns list is limited, then all combined queries # must have the same columns list. Set the selects defined on # the query on all combined queries, if not already set. if not compiler.query.values_select and self.query.values_select: compiler.query = compiler.query.clone() compiler.query.set_values(( *self.query.extra_select, *self.query.values_select, *self.query.annotation_select, )) part_sql, part_args = compiler.as_sql() if compiler.query.combinator: # Wrap in a subquery if wrapping in parentheses isn't # supported. if not features.supports_parentheses_in_compound: part_sql = 'SELECT * FROM ({})'.format(part_sql) # Add parentheses when combining with compound query if not # already added for all compound queries. elif not features.supports_slicing_ordering_in_compound: part_sql = '({})'.format(part_sql) parts += ((part_sql, part_args),) except EmptyResultSet: # Omit the empty queryset with UNION and with DIFFERENCE if the # first queryset is nonempty. if combinator == 'union' or (combinator == 'difference' and parts): continue raise if not parts: raise EmptyResultSet combinator_sql = self.connection.ops.set_operators[combinator] if all and combinator == 'union': combinator_sql += ' ALL' braces = '({})' if features.supports_slicing_ordering_in_compound else '{}' sql_parts, args_parts = zip(*((braces.format(sql), args) for sql, args in parts)) result = [' {} '.format(combinator_sql).join(sql_parts)] params = [] for part in args_parts: params.extend(part) return result, params def as_sql(self, with_limits=True, with_col_aliases=False): """ Create the SQL for this query. Return the SQL string and list of parameters. If 'with_limits' is False, any limit/offset information is not included in the query. """ refcounts_before = self.query.alias_refcount.copy() try: extra_select, order_by, group_by = self.pre_sql_setup() for_update_part = None # Is a LIMIT/OFFSET clause needed? with_limit_offset = with_limits and (self.query.high_mark is not None or self.query.low_mark) combinator = self.query.combinator features = self.connection.features if combinator: if not getattr(features, 'supports_select_{}'.format(combinator)): raise NotSupportedError('{} is not supported on this database backend.'.format(combinator)) result, params = self.get_combinator_sql(combinator, self.query.combinator_all) else: distinct_fields, distinct_params = self.get_distinct() # This must come after 'select', 'ordering', and 'distinct' # (see docstring of get_from_clause() for details). from_, f_params = self.get_from_clause() where, w_params = self.compile(self.where) if self.where is not None else ("", []) having, h_params = self.compile(self.having) if self.having is not None else ("", []) result = ['SELECT'] params = [] if self.query.distinct: distinct_result, distinct_params = self.connection.ops.distinct_sql( distinct_fields, distinct_params, ) result += distinct_result params += distinct_params out_cols = [] col_idx = 1 for _, (s_sql, s_params), alias in self.select + extra_select: if alias: s_sql = '%s AS %s' % (s_sql, self.connection.ops.quote_name(alias)) elif with_col_aliases: s_sql = '%s AS %s' % (s_sql, 'Col%d' % col_idx) col_idx += 1 params.extend(s_params) out_cols.append(s_sql) result += [', '.join(out_cols), 'FROM', *from_] params.extend(f_params) if self.query.select_for_update and self.connection.features.has_select_for_update: if self.connection.get_autocommit(): raise TransactionManagementError('select_for_update cannot be used outside of a transaction.') if with_limit_offset and not self.connection.features.supports_select_for_update_with_limit: raise NotSupportedError( 'LIMIT/OFFSET is not supported with ' 'select_for_update on this database backend.' ) nowait = self.query.select_for_update_nowait skip_locked = self.query.select_for_update_skip_locked of = self.query.select_for_update_of # If it's a NOWAIT/SKIP LOCKED/OF query but the backend # doesn't support it, raise NotSupportedError to prevent a # possible deadlock. if nowait and not self.connection.features.has_select_for_update_nowait: raise NotSupportedError('NOWAIT is not supported on this database backend.') elif skip_locked and not self.connection.features.has_select_for_update_skip_locked: raise NotSupportedError('SKIP LOCKED is not supported on this database backend.') elif of and not self.connection.features.has_select_for_update_of: raise NotSupportedError('FOR UPDATE OF is not supported on this database backend.') for_update_part = self.connection.ops.for_update_sql( nowait=nowait, skip_locked=skip_locked, of=self.get_select_for_update_of_arguments(), ) if for_update_part and self.connection.features.for_update_after_from: result.append(for_update_part) if where: result.append('WHERE %s' % where) params.extend(w_params) grouping = [] for g_sql, g_params in group_by: grouping.append(g_sql) params.extend(g_params) if grouping: if distinct_fields: raise NotImplementedError('annotate() + distinct(fields) is not implemented.') order_by = order_by or self.connection.ops.force_no_ordering() result.append('GROUP BY %s' % ', '.join(grouping)) if self._meta_ordering: # When the deprecation ends, replace with: # order_by = None warnings.warn( "%s QuerySet won't use Meta.ordering in Django 3.1. " "Add .order_by(%s) to retain the current query." % ( self.query.model.__name__, ', '.join(repr(f) for f in self._meta_ordering), ), RemovedInDjango31Warning, stacklevel=4, ) if having: result.append('HAVING %s' % having) params.extend(h_params) if self.query.explain_query: result.insert(0, self.connection.ops.explain_query_prefix( self.query.explain_format, **self.query.explain_options )) if order_by: ordering = [] for _, (o_sql, o_params, _) in order_by: ordering.append(o_sql) params.extend(o_params) result.append('ORDER BY %s' % ', '.join(ordering)) if with_limit_offset: result.append(self.connection.ops.limit_offset_sql(self.query.low_mark, self.query.high_mark)) if for_update_part and not self.connection.features.for_update_after_from: result.append(for_update_part) if self.query.subquery and extra_select: # If the query is used as a subquery, the extra selects would # result in more columns than the left-hand side expression is # expecting. This can happen when a subquery uses a combination # of order_by() and distinct(), forcing the ordering expressions # to be selected as well. Wrap the query in another subquery # to exclude extraneous selects. sub_selects = [] sub_params = [] for index, (select, _, alias) in enumerate(self.select, start=1): if not alias and with_col_aliases: alias = 'col%d' % index if alias: sub_selects.append("%s.%s" % ( self.connection.ops.quote_name('subquery'), self.connection.ops.quote_name(alias), )) else: select_clone = select.relabeled_clone({select.alias: 'subquery'}) subselect, subparams = select_clone.as_sql(self, self.connection) sub_selects.append(subselect) sub_params.extend(subparams) return 'SELECT %s FROM (%s) subquery' % ( ', '.join(sub_selects), ' '.join(result), ), tuple(sub_params + params) return ' '.join(result), tuple(params) finally: # Finally do cleanup - get rid of the joins we created above. self.query.reset_refcounts(refcounts_before) def get_default_columns(self, start_alias=None, opts=None, from_parent=None): """ Compute the default columns for selecting every field in the base model. Will sometimes be called to pull in related models (e.g. via select_related), in which case "opts" and "start_alias" will be given to provide a starting point for the traversal. Return a list of strings, quoted appropriately for use in SQL directly, as well as a set of aliases used in the select statement (if 'as_pairs' is True, return a list of (alias, col_name) pairs instead of strings as the first component and None as the second component). """ result = [] if opts is None: opts = self.query.get_meta() only_load = self.deferred_to_columns() start_alias = start_alias or self.query.get_initial_alias() # The 'seen_models' is used to optimize checking the needed parent # alias for a given field. This also includes None -> start_alias to # be used by local fields. seen_models = {None: start_alias} for field in opts.concrete_fields: model = field.model._meta.concrete_model # A proxy model will have a different model and concrete_model. We # will assign None if the field belongs to this model. if model == opts.model: model = None if from_parent and model is not None and issubclass( from_parent._meta.concrete_model, model._meta.concrete_model): # Avoid loading data for already loaded parents. # We end up here in the case select_related() resolution # proceeds from parent model to child model. In that case the # parent model data is already present in the SELECT clause, # and we want to avoid reloading the same data again. continue if field.model in only_load and field.attname not in only_load[field.model]: continue alias = self.query.join_parent_model(opts, model, start_alias, seen_models) column = field.get_col(alias) result.append(column) return result def get_distinct(self): """ Return a quoted list of fields to use in DISTINCT ON part of the query. This method can alter the tables in the query, and thus it must be called before get_from_clause(). """ result = [] params = [] opts = self.query.get_meta() for name in self.query.distinct_fields: parts = name.split(LOOKUP_SEP) _, targets, alias, joins, path, _, transform_function = self._setup_joins(parts, opts, None) targets, alias, _ = self.query.trim_joins(targets, joins, path) for target in targets: if name in self.query.annotation_select: result.append(name) else: r, p = self.compile(transform_function(target, alias)) result.append(r) params.append(p) return result, params def find_ordering_name(self, name, opts, alias=None, default_order='ASC', already_seen=None): """ Return the table alias (the name might be ambiguous, the alias will not be) and column name for ordering by the given 'name' parameter. The 'name' is of the form 'field1__field2__...__fieldN'. """ name, order = get_order_dir(name, default_order) descending = order == 'DESC' pieces = name.split(LOOKUP_SEP) field, targets, alias, joins, path, opts, transform_function = self._setup_joins(pieces, opts, alias) # If we get to this point and the field is a relation to another model, # append the default ordering for that model unless the attribute name # of the field is specified. if field.is_relation and opts.ordering and getattr(field, 'attname', None) != name: # Firstly, avoid infinite loops. already_seen = already_seen or set() join_tuple = tuple(getattr(self.query.alias_map[j], 'join_cols', None) for j in joins) if join_tuple in already_seen: raise FieldError('Infinite loop caused by ordering.') already_seen.add(join_tuple) results = [] for item in opts.ordering: if hasattr(item, 'resolve_expression') and not isinstance(item, OrderBy): item = item.desc() if descending else item.asc() if isinstance(item, OrderBy): results.append((item, False)) continue results.extend(self.find_ordering_name(item, opts, alias, order, already_seen)) return results targets, alias, _ = self.query.trim_joins(targets, joins, path) return [(OrderBy(transform_function(t, alias), descending=descending), False) for t in targets] def _setup_joins(self, pieces, opts, alias): """ Helper method for get_order_by() and get_distinct(). get_ordering() and get_distinct() must produce same target columns on same input, as the prefixes of get_ordering() and get_distinct() must match. Executing SQL where this is not true is an error. """ alias = alias or self.query.get_initial_alias() field, targets, opts, joins, path, transform_function = self.query.setup_joins(pieces, opts, alias) alias = joins[-1] return field, targets, alias, joins, path, opts, transform_function def get_from_clause(self): """ Return a list of strings that are joined together to go after the "FROM" part of the query, as well as a list any extra parameters that need to be included. Subclasses, can override this to create a from-clause via a "select". This should only be called after any SQL construction methods that might change the tables that are needed. This means the select columns, ordering, and distinct must be done first. """ result = [] params = [] for alias in tuple(self.query.alias_map): if not self.query.alias_refcount[alias]: continue try: from_clause = self.query.alias_map[alias] except KeyError: # Extra tables can end up in self.tables, but not in the # alias_map if they aren't in a join. That's OK. We skip them. continue clause_sql, clause_params = self.compile(from_clause) result.append(clause_sql) params.extend(clause_params) for t in self.query.extra_tables: alias, _ = self.query.table_alias(t) # Only add the alias if it's not already present (the table_alias() # call increments the refcount, so an alias refcount of one means # this is the only reference). if alias not in self.query.alias_map or self.query.alias_refcount[alias] == 1: result.append(', %s' % self.quote_name_unless_alias(alias)) return result, params def get_related_selections(self, select, opts=None, root_alias=None, cur_depth=1, requested=None, restricted=None): """ Fill in the information needed for a select_related query. The current depth is measured as the number of connections away from the root model (for example, cur_depth=1 means we are looking at models with direct connections to the root model). """ def _get_field_choices(): direct_choices = (f.name for f in opts.fields if f.is_relation) reverse_choices = ( f.field.related_query_name() for f in opts.related_objects if f.field.unique ) return chain(direct_choices, reverse_choices, self.query._filtered_relations) related_klass_infos = [] if not restricted and cur_depth > self.query.max_depth: # We've recursed far enough; bail out. return related_klass_infos if not opts: opts = self.query.get_meta() root_alias = self.query.get_initial_alias() only_load = self.query.get_loaded_field_names() # Setup for the case when only particular related fields should be # included in the related selection. fields_found = set() if requested is None: restricted = isinstance(self.query.select_related, dict) if restricted: requested = self.query.select_related def get_related_klass_infos(klass_info, related_klass_infos): klass_info['related_klass_infos'] = related_klass_infos for f in opts.fields: field_model = f.model._meta.concrete_model fields_found.add(f.name) if restricted: next = requested.get(f.name, {}) if not f.is_relation: # If a non-related field is used like a relation, # or if a single non-relational field is given. if next or f.name in requested: raise FieldError( "Non-relational field given in select_related: '%s'. " "Choices are: %s" % ( f.name, ", ".join(_get_field_choices()) or '(none)', ) ) else: next = False if not select_related_descend(f, restricted, requested, only_load.get(field_model)): continue klass_info = { 'model': f.remote_field.model, 'field': f, 'reverse': False, 'local_setter': f.set_cached_value, 'remote_setter': f.remote_field.set_cached_value if f.unique else lambda x, y: None, 'from_parent': False, } related_klass_infos.append(klass_info) select_fields = [] _, _, _, joins, _, _ = self.query.setup_joins( [f.name], opts, root_alias) alias = joins[-1] columns = self.get_default_columns(start_alias=alias, opts=f.remote_field.model._meta) for col in columns: select_fields.append(len(select)) select.append((col, None)) klass_info['select_fields'] = select_fields next_klass_infos = self.get_related_selections( select, f.remote_field.model._meta, alias, cur_depth + 1, next, restricted) get_related_klass_infos(klass_info, next_klass_infos) if restricted: related_fields = [ (o.field, o.related_model) for o in opts.related_objects if o.field.unique and not o.many_to_many ] for f, model in related_fields: if not select_related_descend(f, restricted, requested, only_load.get(model), reverse=True): continue related_field_name = f.related_query_name() fields_found.add(related_field_name) join_info = self.query.setup_joins([related_field_name], opts, root_alias) alias = join_info.joins[-1] from_parent = issubclass(model, opts.model) and model is not opts.model klass_info = { 'model': model, 'field': f, 'reverse': True, 'local_setter': f.remote_field.set_cached_value, 'remote_setter': f.set_cached_value, 'from_parent': from_parent, } related_klass_infos.append(klass_info) select_fields = [] columns = self.get_default_columns( start_alias=alias, opts=model._meta, from_parent=opts.model) for col in columns: select_fields.append(len(select)) select.append((col, None)) klass_info['select_fields'] = select_fields next = requested.get(f.related_query_name(), {}) next_klass_infos = self.get_related_selections( select, model._meta, alias, cur_depth + 1, next, restricted) get_related_klass_infos(klass_info, next_klass_infos) for name in list(requested): # Filtered relations work only on the topmost level. if cur_depth > 1: break if name in self.query._filtered_relations: fields_found.add(name) f, _, join_opts, joins, _, _ = self.query.setup_joins([name], opts, root_alias) model = join_opts.model alias = joins[-1] from_parent = issubclass(model, opts.model) and model is not opts.model def local_setter(obj, from_obj): # Set a reverse fk object when relation is non-empty. if from_obj: f.remote_field.set_cached_value(from_obj, obj) def remote_setter(obj, from_obj): setattr(from_obj, name, obj) klass_info = { 'model': model, 'field': f, 'reverse': True, 'local_setter': local_setter, 'remote_setter': remote_setter, 'from_parent': from_parent, } related_klass_infos.append(klass_info) select_fields = [] columns = self.get_default_columns( start_alias=alias, opts=model._meta, from_parent=opts.model, ) for col in columns: select_fields.append(len(select)) select.append((col, None)) klass_info['select_fields'] = select_fields next_requested = requested.get(name, {}) next_klass_infos = self.get_related_selections( select, opts=model._meta, root_alias=alias, cur_depth=cur_depth + 1, requested=next_requested, restricted=restricted, ) get_related_klass_infos(klass_info, next_klass_infos) fields_not_found = set(requested).difference(fields_found) if fields_not_found: invalid_fields = ("'%s'" % s for s in fields_not_found) raise FieldError( 'Invalid field name(s) given in select_related: %s. ' 'Choices are: %s' % ( ', '.join(invalid_fields), ', '.join(_get_field_choices()) or '(none)', ) ) return related_klass_infos def get_select_for_update_of_arguments(self): """ Return a quoted list of arguments for the SELECT FOR UPDATE OF part of the query. """ def _get_field_choices(): """Yield all allowed field paths in breadth-first search order.""" queue = collections.deque([(None, self.klass_info)]) while queue: parent_path, klass_info = queue.popleft() if parent_path is None: path = [] yield 'self' else: field = klass_info['field'] if klass_info['reverse']: field = field.remote_field path = parent_path + [field.name] yield LOOKUP_SEP.join(path) queue.extend( (path, klass_info) for klass_info in klass_info.get('related_klass_infos', []) ) result = [] invalid_names = [] for name in self.query.select_for_update_of: parts = [] if name == 'self' else name.split(LOOKUP_SEP) klass_info = self.klass_info for part in parts: for related_klass_info in klass_info.get('related_klass_infos', []): field = related_klass_info['field'] if related_klass_info['reverse']: field = field.remote_field if field.name == part: klass_info = related_klass_info break else: klass_info = None break if klass_info is None: invalid_names.append(name) continue select_index = klass_info['select_fields'][0] col = self.select[select_index][0] if self.connection.features.select_for_update_of_column: result.append(self.compile(col)[0]) else: result.append(self.quote_name_unless_alias(col.alias)) if invalid_names: raise FieldError( 'Invalid field name(s) given in select_for_update(of=(...)): %s. ' 'Only relational fields followed in the query are allowed. ' 'Choices are: %s.' % ( ', '.join(invalid_names), ', '.join(_get_field_choices()), ) ) return result def deferred_to_columns(self): """ Convert the self.deferred_loading data structure to mapping of table names to sets of column names which are to be loaded. Return the dictionary. """ columns = {} self.query.deferred_to_data(columns, self.query.get_loaded_field_names_cb) return columns def get_converters(self, expressions): converters = {} for i, expression in enumerate(expressions): if expression: backend_converters = self.connection.ops.get_db_converters(expression) field_converters = expression.get_db_converters(self.connection) if backend_converters or field_converters: converters[i] = (backend_converters + field_converters, expression) return converters def apply_converters(self, rows, converters): connection = self.connection converters = list(converters.items()) for row in map(list, rows): for pos, (convs, expression) in converters: value = row[pos] for converter in convs: value = converter(value, expression, connection) row[pos] = value yield row def results_iter(self, results=None, tuple_expected=False, chunked_fetch=False, chunk_size=GET_ITERATOR_CHUNK_SIZE): """Return an iterator over the results from executing this query.""" if results is None: results = self.execute_sql(MULTI, chunked_fetch=chunked_fetch, chunk_size=chunk_size) fields = [s[0] for s in self.select[0:self.col_count]] converters = self.get_converters(fields) rows = chain.from_iterable(results) if converters: rows = self.apply_converters(rows, converters) if tuple_expected: rows = map(tuple, rows) return rows def has_results(self): """ Backends (e.g. NoSQL) can override this in order to use optimized versions of "query has any results." """ # This is always executed on a query clone, so we can modify self.query self.query.add_extra({'a': 1}, None, None, None, None, None) self.query.set_extra_mask(['a']) return bool(self.execute_sql(SINGLE)) def execute_sql(self, result_type=MULTI, chunked_fetch=False, chunk_size=GET_ITERATOR_CHUNK_SIZE): """ Run the query against the database and return the result(s). The return value is a single data item if result_type is SINGLE, or an iterator over the results if the result_type is MULTI. result_type is either MULTI (use fetchmany() to retrieve all rows), SINGLE (only retrieve a single row), or None. In this last case, the cursor is returned if any query is executed, since it's used by subclasses such as InsertQuery). It's possible, however, that no query is needed, as the filters describe an empty set. In that case, None is returned, to avoid any unnecessary database interaction. """ result_type = result_type or NO_RESULTS try: sql, params = self.as_sql() if not sql: raise EmptyResultSet except EmptyResultSet: if result_type == MULTI: return iter([]) else: return if chunked_fetch: cursor = self.connection.chunked_cursor() else: cursor = self.connection.cursor() try: cursor.execute(sql, params) except Exception: # Might fail for server-side cursors (e.g. connection closed) cursor.close() raise if result_type == CURSOR: # Give the caller the cursor to process and close. return cursor if result_type == SINGLE: try: val = cursor.fetchone() if val: return val[0:self.col_count] return val finally: # done with the cursor cursor.close() if result_type == NO_RESULTS: cursor.close() return result = cursor_iter( cursor, self.connection.features.empty_fetchmany_value, self.col_count if self.has_extra_select else None, chunk_size, ) if not chunked_fetch or not self.connection.features.can_use_chunked_reads: try: # If we are using non-chunked reads, we return the same data # structure as normally, but ensure it is all read into memory # before going any further. Use chunked_fetch if requested, # unless the database doesn't support it. return list(result) finally: # done with the cursor cursor.close() return result def as_subquery_condition(self, alias, columns, compiler): qn = compiler.quote_name_unless_alias qn2 = self.connection.ops.quote_name for index, select_col in enumerate(self.query.select): lhs_sql, lhs_params = self.compile(select_col) rhs = '%s.%s' % (qn(alias), qn2(columns[index])) self.query.where.add( QueryWrapper('%s = %s' % (lhs_sql, rhs), lhs_params), 'AND') sql, params = self.as_sql() return 'EXISTS (%s)' % sql, params def explain_query(self): result = list(self.execute_sql()) # Some backends return 1 item tuples with strings, and others return # tuples with integers and strings. Flatten them out into strings. for row in result[0]: if not isinstance(row, str): yield ' '.join(str(c) for c in row) else: yield row class SQLInsertCompiler(SQLCompiler): return_id = False def field_as_sql(self, field, val): """ Take a field and a value intended to be saved on that field, and return placeholder SQL and accompanying params. Check for raw values, expressions, and fields with get_placeholder() defined in that order. When field is None, consider the value raw and use it as the placeholder, with no corresponding parameters returned. """ if field is None: # A field value of None means the value is raw. sql, params = val, [] elif hasattr(val, 'as_sql'): # This is an expression, let's compile it. sql, params = self.compile(val) elif hasattr(field, 'get_placeholder'): # Some fields (e.g. geo fields) need special munging before # they can be inserted. sql, params = field.get_placeholder(val, self, self.connection), [val] else: # Return the common case for the placeholder sql, params = '%s', [val] # The following hook is only used by Oracle Spatial, which sometimes # needs to yield 'NULL' and [] as its placeholder and params instead # of '%s' and [None]. The 'NULL' placeholder is produced earlier by # OracleOperations.get_geom_placeholder(). The following line removes # the corresponding None parameter. See ticket #10888. params = self.connection.ops.modify_insert_params(sql, params) return sql, params def prepare_value(self, field, value): """ Prepare a value to be used in a query by resolving it if it is an expression and otherwise calling the field's get_db_prep_save(). """ if hasattr(value, 'resolve_expression'): value = value.resolve_expression(self.query, allow_joins=False, for_save=True) # Don't allow values containing Col expressions. They refer to # existing columns on a row, but in the case of insert the row # doesn't exist yet. if value.contains_column_references: raise ValueError( 'Failed to insert expression "%s" on %s. F() expressions ' 'can only be used to update, not to insert.' % (value, field) ) if value.contains_aggregate: raise FieldError( 'Aggregate functions are not allowed in this query ' '(%s=%r).' % (field.name, value) ) if value.contains_over_clause: raise FieldError( 'Window expressions are not allowed in this query (%s=%r).' % (field.name, value) ) else: value = field.get_db_prep_save(value, connection=self.connection) return value def pre_save_val(self, field, obj): """ Get the given field's value off the given obj. pre_save() is used for things like auto_now on DateTimeField. Skip it if this is a raw query. """ if self.query.raw: return getattr(obj, field.attname) return field.pre_save(obj, add=True) def assemble_as_sql(self, fields, value_rows): """ Take a sequence of N fields and a sequence of M rows of values, and generate placeholder SQL and parameters for each field and value. Return a pair containing: * a sequence of M rows of N SQL placeholder strings, and * a sequence of M rows of corresponding parameter values. Each placeholder string may contain any number of '%s' interpolation strings, and each parameter row will contain exactly as many params as the total number of '%s's in the corresponding placeholder row. """ if not value_rows: return [], [] # list of (sql, [params]) tuples for each object to be saved # Shape: [n_objs][n_fields][2] rows_of_fields_as_sql = ( (self.field_as_sql(field, v) for field, v in zip(fields, row)) for row in value_rows ) # tuple like ([sqls], [[params]s]) for each object to be saved # Shape: [n_objs][2][n_fields] sql_and_param_pair_rows = (zip(*row) for row in rows_of_fields_as_sql) # Extract separate lists for placeholders and params. # Each of these has shape [n_objs][n_fields] placeholder_rows, param_rows = zip(*sql_and_param_pair_rows) # Params for each field are still lists, and need to be flattened. param_rows = [[p for ps in row for p in ps] for row in param_rows] return placeholder_rows, param_rows def as_sql(self): # We don't need quote_name_unless_alias() here, since these are all # going to be column names (so we can avoid the extra overhead). qn = self.connection.ops.quote_name opts = self.query.get_meta() insert_statement = self.connection.ops.insert_statement(ignore_conflicts=self.query.ignore_conflicts) result = ['%s %s' % (insert_statement, qn(opts.db_table))] fields = self.query.fields or [opts.pk] result.append('(%s)' % ', '.join(qn(f.column) for f in fields)) if self.query.fields: value_rows = [ [self.prepare_value(field, self.pre_save_val(field, obj)) for field in fields] for obj in self.query.objs ] else: # An empty object. value_rows = [[self.connection.ops.pk_default_value()] for _ in self.query.objs] fields = [None] # Currently the backends just accept values when generating bulk # queries and generate their own placeholders. Doing that isn't # necessary and it should be possible to use placeholders and # expressions in bulk inserts too. can_bulk = (not self.return_id and self.connection.features.has_bulk_insert) placeholder_rows, param_rows = self.assemble_as_sql(fields, value_rows) ignore_conflicts_suffix_sql = self.connection.ops.ignore_conflicts_suffix_sql( ignore_conflicts=self.query.ignore_conflicts ) if self.return_id and self.connection.features.can_return_columns_from_insert: if self.connection.features.can_return_rows_from_bulk_insert: result.append(self.connection.ops.bulk_insert_sql(fields, placeholder_rows)) params = param_rows else: result.append("VALUES (%s)" % ", ".join(placeholder_rows[0])) params = [param_rows[0]] if ignore_conflicts_suffix_sql: result.append(ignore_conflicts_suffix_sql) col = "%s.%s" % (qn(opts.db_table), qn(opts.pk.column)) r_fmt, r_params = self.connection.ops.return_insert_id(opts.pk) # Skip empty r_fmt to allow subclasses to customize behavior for # 3rd party backends. Refs #19096. if r_fmt: result.append(r_fmt % col) params += [r_params] return [(" ".join(result), tuple(chain.from_iterable(params)))] if can_bulk: result.append(self.connection.ops.bulk_insert_sql(fields, placeholder_rows)) if ignore_conflicts_suffix_sql: result.append(ignore_conflicts_suffix_sql) return [(" ".join(result), tuple(p for ps in param_rows for p in ps))] else: if ignore_conflicts_suffix_sql: result.append(ignore_conflicts_suffix_sql) return [ (" ".join(result + ["VALUES (%s)" % ", ".join(p)]), vals) for p, vals in zip(placeholder_rows, param_rows) ] def execute_sql(self, return_id=False): assert not ( return_id and len(self.query.objs) != 1 and not self.connection.features.can_return_rows_from_bulk_insert ) self.return_id = return_id with self.connection.cursor() as cursor: for sql, params in self.as_sql(): cursor.execute(sql, params) if not return_id: return if self.connection.features.can_return_rows_from_bulk_insert and len(self.query.objs) > 1: return self.connection.ops.fetch_returned_insert_ids(cursor) if self.connection.features.can_return_columns_from_insert: assert len(self.query.objs) == 1 return self.connection.ops.fetch_returned_insert_id(cursor) return self.connection.ops.last_insert_id( cursor, self.query.get_meta().db_table, self.query.get_meta().pk.column ) class SQLDeleteCompiler(SQLCompiler): def as_sql(self): """ Create the SQL for this query. Return the SQL string and list of parameters. """ assert len([t for t in self.query.alias_map if self.query.alias_refcount[t] > 0]) == 1, \ "Can only delete from one table at a time." qn = self.quote_name_unless_alias result = ['DELETE FROM %s' % qn(self.query.base_table)] where, params = self.compile(self.query.where) if where: result.append('WHERE %s' % where) return ' '.join(result), tuple(params) class SQLUpdateCompiler(SQLCompiler): def as_sql(self): """ Create the SQL for this query. Return the SQL string and list of parameters. """ self.pre_sql_setup() if not self.query.values: return '', () qn = self.quote_name_unless_alias values, update_params = [], [] for field, model, val in self.query.values: if hasattr(val, 'resolve_expression'): val = val.resolve_expression(self.query, allow_joins=False, for_save=True) if val.contains_aggregate: raise FieldError( 'Aggregate functions are not allowed in this query ' '(%s=%r).' % (field.name, val) ) if val.contains_over_clause: raise FieldError( 'Window expressions are not allowed in this query ' '(%s=%r).' % (field.name, val) ) elif hasattr(val, 'prepare_database_save'): if field.remote_field: val = field.get_db_prep_save( val.prepare_database_save(field), connection=self.connection, ) else: raise TypeError( "Tried to update field %s with a model instance, %r. " "Use a value compatible with %s." % (field, val, field.__class__.__name__) ) else: val = field.get_db_prep_save(val, connection=self.connection) # Getting the placeholder for the field. if hasattr(field, 'get_placeholder'): placeholder = field.get_placeholder(val, self, self.connection) else: placeholder = '%s' name = field.column if hasattr(val, 'as_sql'): sql, params = self.compile(val) values.append('%s = %s' % (qn(name), placeholder % sql)) update_params.extend(params) elif val is not None: values.append('%s = %s' % (qn(name), placeholder)) update_params.append(val) else: values.append('%s = NULL' % qn(name)) table = self.query.base_table result = [ 'UPDATE %s SET' % qn(table), ', '.join(values), ] where, params = self.compile(self.query.where) if where: result.append('WHERE %s' % where) return ' '.join(result), tuple(update_params + params) def execute_sql(self, result_type): """ Execute the specified update. Return the number of rows affected by the primary update query. The "primary update query" is the first non-empty query that is executed. Row counts for any subsequent, related queries are not available. """ cursor = super().execute_sql(result_type) try: rows = cursor.rowcount if cursor else 0 is_empty = cursor is None finally: if cursor: cursor.close() for query in self.query.get_related_updates(): aux_rows = query.get_compiler(self.using).execute_sql(result_type) if is_empty and aux_rows: rows = aux_rows is_empty = False return rows def pre_sql_setup(self): """ If the update depends on results from other tables, munge the "where" conditions to match the format required for (portable) SQL updates. If multiple updates are required, pull out the id values to update at this point so that they don't change as a result of the progressive updates. """ refcounts_before = self.query.alias_refcount.copy() # Ensure base table is in the query self.query.get_initial_alias() count = self.query.count_active_tables() if not self.query.related_updates and count == 1: return query = self.query.chain(klass=Query) query.select_related = False query.clear_ordering(True) query.extra = {} query.select = [] query.add_fields([query.get_meta().pk.name]) super().pre_sql_setup() must_pre_select = count > 1 and not self.connection.features.update_can_self_select # Now we adjust the current query: reset the where clause and get rid # of all the tables we don't need (since they're in the sub-select). self.query.where = self.query.where_class() if self.query.related_updates or must_pre_select: # Either we're using the idents in multiple update queries (so # don't want them to change), or the db backend doesn't support # selecting from the updating table (e.g. MySQL). idents = [] for rows in query.get_compiler(self.using).execute_sql(MULTI): idents.extend(r[0] for r in rows) self.query.add_filter(('pk__in', idents)) self.query.related_ids = idents else: # The fast path. Filters and updates in one query. self.query.add_filter(('pk__in', query)) self.query.reset_refcounts(refcounts_before) class SQLAggregateCompiler(SQLCompiler): def as_sql(self): """ Create the SQL for this query. Return the SQL string and list of parameters. """ sql, params = [], [] for annotation in self.query.annotation_select.values(): ann_sql, ann_params = self.compile(annotation, select_format=FORCE) sql.append(ann_sql) params.extend(ann_params) self.col_count = len(self.query.annotation_select) sql = ', '.join(sql) params = tuple(params) sql = 'SELECT %s FROM (%s) subquery' % (sql, self.query.subquery) params = params + self.query.sub_params return sql, params def cursor_iter(cursor, sentinel, col_count, itersize): """ Yield blocks of rows from a cursor and ensure the cursor is closed when done. """ try: for rows in iter((lambda: cursor.fetchmany(itersize)), sentinel): yield rows if col_count is None else [r[:col_count] for r in rows] finally: cursor.close()
f7b5569034371ddd9712d889c1f792e603f658b2bb16f6b333d4b75db4dd6474
from django.db.utils import ProgrammingError from django.utils.functional import cached_property class BaseDatabaseFeatures: gis_enabled = False allows_group_by_pk = False allows_group_by_selected_pks = False empty_fetchmany_value = [] update_can_self_select = True # Does the backend distinguish between '' and None? interprets_empty_strings_as_nulls = False # Does the backend allow inserting duplicate NULL rows in a nullable # unique field? All core backends implement this correctly, but other # databases such as SQL Server do not. supports_nullable_unique_constraints = True # Does the backend allow inserting duplicate rows when a unique_together # constraint exists and some fields are nullable but not all of them? supports_partially_nullable_unique_constraints = True can_use_chunked_reads = True can_return_columns_from_insert = False can_return_rows_from_bulk_insert = False has_bulk_insert = True uses_savepoints = True can_release_savepoints = False # If True, don't use integer foreign keys referring to, e.g., positive # integer primary keys. related_fields_match_type = False allow_sliced_subqueries_with_in = True has_select_for_update = False has_select_for_update_nowait = False has_select_for_update_skip_locked = False has_select_for_update_of = False # Does the database's SELECT FOR UPDATE OF syntax require a column rather # than a table? select_for_update_of_column = False # Does the default test database allow multiple connections? # Usually an indication that the test database is in-memory test_db_allows_multiple_connections = True # Can an object be saved without an explicit primary key? supports_unspecified_pk = False # Can a fixture contain forward references? i.e., are # FK constraints checked at the end of transaction, or # at the end of each save operation? supports_forward_references = True # Does the backend truncate names properly when they are too long? truncates_names = False # Is there a REAL datatype in addition to floats/doubles? has_real_datatype = False supports_subqueries_in_group_by = True # Is there a true datatype for uuid? has_native_uuid_field = False # Is there a true datatype for timedeltas? has_native_duration_field = False # Does the database driver supports same type temporal data subtraction # by returning the type used to store duration field? supports_temporal_subtraction = False # Does the __regex lookup support backreferencing and grouping? supports_regex_backreferencing = True # Can date/datetime lookups be performed using a string? supports_date_lookup_using_string = True # Can datetimes with timezones be used? supports_timezones = True # Does the database have a copy of the zoneinfo database? has_zoneinfo_database = True # When performing a GROUP BY, is an ORDER BY NULL required # to remove any ordering? requires_explicit_null_ordering_when_grouping = False # Does the backend order NULL values as largest or smallest? nulls_order_largest = False # The database's limit on the number of query parameters. max_query_params = None # Can an object have an autoincrement primary key of 0? MySQL says No. allows_auto_pk_0 = True # Do we need to NULL a ForeignKey out, or can the constraint check be # deferred can_defer_constraint_checks = False # date_interval_sql can properly handle mixed Date/DateTime fields and timedeltas supports_mixed_date_datetime_comparisons = True # Does the backend support tablespaces? Default to False because it isn't # in the SQL standard. supports_tablespaces = False # Does the backend reset sequences between tests? supports_sequence_reset = True # Can the backend introspect the default value of a column? can_introspect_default = True # Confirm support for introspected foreign keys # Every database can do this reliably, except MySQL, # which can't do it for MyISAM tables can_introspect_foreign_keys = True # Can the backend introspect an AutoField, instead of an IntegerField? can_introspect_autofield = False # Can the backend introspect a BigIntegerField, instead of an IntegerField? can_introspect_big_integer_field = True # Can the backend introspect an BinaryField, instead of an TextField? can_introspect_binary_field = True # Can the backend introspect an DecimalField, instead of an FloatField? can_introspect_decimal_field = True # Can the backend introspect a DurationField, instead of a BigIntegerField? can_introspect_duration_field = True # Can the backend introspect an IPAddressField, instead of an CharField? can_introspect_ip_address_field = False # Can the backend introspect a PositiveIntegerField, instead of an IntegerField? can_introspect_positive_integer_field = False # Can the backend introspect a SmallIntegerField, instead of an IntegerField? can_introspect_small_integer_field = False # Can the backend introspect a TimeField, instead of a DateTimeField? can_introspect_time_field = True # Some backends may not be able to differentiate BigAutoField from other # fields such as AutoField. introspected_big_auto_field_type = 'BigAutoField' # Some backends may not be able to differentiate BooleanField from other # fields such as IntegerField. introspected_boolean_field_type = 'BooleanField' # Can the backend introspect the column order (ASC/DESC) for indexes? supports_index_column_ordering = True # Does the backend support introspection of materialized views? can_introspect_materialized_views = False # Support for the DISTINCT ON clause can_distinct_on_fields = False # Does the backend prevent running SQL queries in broken transactions? atomic_transactions = True # Can we roll back DDL in a transaction? can_rollback_ddl = False # Does it support operations requiring references rename in a transaction? supports_atomic_references_rename = True # Can we issue more than one ALTER COLUMN clause in an ALTER TABLE? supports_combined_alters = False # Does it support foreign keys? supports_foreign_keys = True # Can it create foreign key constraints inline when adding columns? can_create_inline_fk = True # Does it support CHECK constraints? supports_column_check_constraints = True supports_table_check_constraints = True # Does the backend support introspection of CHECK constraints? can_introspect_check_constraints = True # Does the backend support 'pyformat' style ("... %(name)s ...", {'name': value}) # parameter passing? Note this can be provided by the backend even if not # supported by the Python driver supports_paramstyle_pyformat = True # Does the backend require literal defaults, rather than parameterized ones? requires_literal_defaults = False # Does the backend require a connection reset after each material schema change? connection_persists_old_columns = False # What kind of error does the backend throw when accessing closed cursor? closed_cursor_error_class = ProgrammingError # Does 'a' LIKE 'A' match? has_case_insensitive_like = True # Suffix for backends that don't support "SELECT xxx;" queries. bare_select_suffix = '' # If NULL is implied on columns without needing to be explicitly specified implied_column_null = False # Does the backend support "select for update" queries with limit (and offset)? supports_select_for_update_with_limit = True # Does the backend ignore null expressions in GREATEST and LEAST queries unless # every expression is null? greatest_least_ignores_nulls = False # Can the backend clone databases for parallel test execution? # Defaults to False to allow third-party backends to opt-in. can_clone_databases = False # Does the backend consider table names with different casing to # be equal? ignores_table_name_case = False # Place FOR UPDATE right after FROM clause. Used on MSSQL. for_update_after_from = False # Combinatorial flags supports_select_union = True supports_select_intersection = True supports_select_difference = True supports_slicing_ordering_in_compound = False supports_parentheses_in_compound = True # Does the database support SQL 2003 FILTER (WHERE ...) in aggregate # expressions? supports_aggregate_filter_clause = False # Does the backend support indexing a TextField? supports_index_on_text_field = True # Does the backend support window expressions (expression OVER (...))? supports_over_clause = False supports_frame_range_fixed_distance = False # Does the backend support CAST with precision? supports_cast_with_precision = True # How many second decimals does the database return when casting a value to # a type with time? time_cast_precision = 6 # SQL to create a procedure for use by the Django test suite. The # functionality of the procedure isn't important. create_test_procedure_without_params_sql = None create_test_procedure_with_int_param_sql = None # Does the backend support keyword parameters for cursor.callproc()? supports_callproc_kwargs = False # Convert CharField results from bytes to str in database functions. db_functions_convert_bytes_to_str = False # What formats does the backend EXPLAIN syntax support? supported_explain_formats = set() # Does DatabaseOperations.explain_query_prefix() raise ValueError if # unknown kwargs are passed to QuerySet.explain()? validates_explain_options = True # Does the backend support the default parameter in lead() and lag()? supports_default_in_lead_lag = True # Does the backend support ignoring constraint or uniqueness errors during # INSERT? supports_ignore_conflicts = True # Does this backend require casting the results of CASE expressions used # in UPDATE statements to ensure the expression has the correct type? requires_casted_case_in_updates = False # Does the backend support partial indexes (CREATE INDEX ... WHERE ...)? supports_partial_indexes = True supports_functions_in_partial_indexes = True # Does the database allow more than one constraint or index on the same # field(s)? allows_multiple_constraints_on_same_fields = True def __init__(self, connection): self.connection = connection @cached_property def supports_explaining_query_execution(self): """Does this backend support explaining query execution?""" return self.connection.ops.explain_prefix is not None @cached_property def supports_transactions(self): """Confirm support for transactions.""" with self.connection.cursor() as cursor: cursor.execute('CREATE TABLE ROLLBACK_TEST (X INT)') self.connection.set_autocommit(False) cursor.execute('INSERT INTO ROLLBACK_TEST (X) VALUES (8)') self.connection.rollback() self.connection.set_autocommit(True) cursor.execute('SELECT COUNT(X) FROM ROLLBACK_TEST') count, = cursor.fetchone() cursor.execute('DROP TABLE ROLLBACK_TEST') return count == 0