hash
stringlengths
64
64
content
stringlengths
0
1.51M
a3dc70bca9f6848b21b9a6789de6b811cc7e8c18714f33c32e0d96fa0ca090bc
# This file is distributed under the same license as the Django package. # # The *_FORMAT strings use the Django date format syntax, # see https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date DATE_FORMAT = 'j E Y г.' TIME_FORMAT = 'G:i' DATETIME_FORMAT = 'j E Y г. G:i' YEAR_MONTH_FORMAT = 'F Y г.' MONTH_DAY_FORMAT = 'j F' SHORT_DATE_FORMAT = 'd.m.Y' SHORT_DATETIME_FORMAT = 'd.m.Y H:i' FIRST_DAY_OF_WEEK = 1 # Monday # The *_INPUT_FORMATS strings use the Python strftime format syntax, # see https://docs.python.org/library/datetime.html#strftime-strptime-behavior DATE_INPUT_FORMATS = [ '%d.%m.%Y', # '25.10.2006' '%d.%m.%y', # '25.10.06' ] DATETIME_INPUT_FORMATS = [ '%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59' '%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200' '%d.%m.%Y %H:%M', # '25.10.2006 14:30' '%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59' '%d.%m.%y %H:%M:%S.%f', # '25.10.06 14:30:59.000200' '%d.%m.%y %H:%M', # '25.10.06 14:30' ] DECIMAL_SEPARATOR = ',' THOUSAND_SEPARATOR = '\xa0' # non-breaking space NUMBER_GROUPING = 3
a16dcfbfdf0e57a7ff1f63e2a468709b62cde188c3bba3eaeb1f9b9c27a97e9c
""" The main QuerySet implementation. This provides the public API for the ORM. """ import copy import operator import warnings from collections import namedtuple from functools import lru_cache from itertools import chain from django.conf import settings from django.core import exceptions from django.db import ( DJANGO_VERSION_PICKLE_KEY, IntegrityError, connections, router, transaction, ) from django.db.models import DateField, DateTimeField, sql from django.db.models.constants import LOOKUP_SEP from django.db.models.deletion import Collector from django.db.models.expressions import Case, Expression, F, Value, When from django.db.models.fields import AutoField from django.db.models.functions import Cast, Trunc from django.db.models.query_utils import FilteredRelation, Q from django.db.models.sql.constants import CURSOR, GET_ITERATOR_CHUNK_SIZE from django.db.models.utils import resolve_callables from django.db.utils import NotSupportedError from django.utils import timezone from django.utils.functional import cached_property, partition from django.utils.version import get_version # The maximum number of results to fetch in a get() query. MAX_GET_RESULTS = 21 # The maximum number of items to display in a QuerySet.__repr__ REPR_OUTPUT_SIZE = 20 class BaseIterable: def __init__(self, queryset, chunked_fetch=False, chunk_size=GET_ITERATOR_CHUNK_SIZE): self.queryset = queryset self.chunked_fetch = chunked_fetch self.chunk_size = chunk_size class ModelIterable(BaseIterable): """Iterable that yields a model instance for each row.""" def __iter__(self): queryset = self.queryset db = queryset.db compiler = queryset.query.get_compiler(using=db) # Execute the query. This will also fill compiler.select, klass_info, # and annotations. results = compiler.execute_sql(chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size) select, klass_info, annotation_col_map = (compiler.select, compiler.klass_info, compiler.annotation_col_map) model_cls = klass_info['model'] select_fields = klass_info['select_fields'] model_fields_start, model_fields_end = select_fields[0], select_fields[-1] + 1 init_list = [f[0].target.attname for f in select[model_fields_start:model_fields_end]] related_populators = get_related_populators(klass_info, select, db) known_related_objects = [ (field, related_objs, operator.attrgetter(*[ field.attname if from_field == 'self' else queryset.model._meta.get_field(from_field).attname for from_field in field.from_fields ])) for field, related_objs in queryset._known_related_objects.items() ] for row in compiler.results_iter(results): obj = model_cls.from_db(db, init_list, row[model_fields_start:model_fields_end]) for rel_populator in related_populators: rel_populator.populate(row, obj) if annotation_col_map: for attr_name, col_pos in annotation_col_map.items(): setattr(obj, attr_name, row[col_pos]) # Add the known related objects to the model. for field, rel_objs, rel_getter in known_related_objects: # Avoid overwriting objects loaded by, e.g., select_related(). if field.is_cached(obj): continue rel_obj_id = rel_getter(obj) try: rel_obj = rel_objs[rel_obj_id] except KeyError: pass # May happen in qs1 | qs2 scenarios. else: setattr(obj, field.name, rel_obj) yield obj class ValuesIterable(BaseIterable): """ Iterable returned by QuerySet.values() that yields a dict for each row. """ def __iter__(self): queryset = self.queryset query = queryset.query compiler = query.get_compiler(queryset.db) # extra(select=...) cols are always at the start of the row. names = [ *query.extra_select, *query.values_select, *query.annotation_select, ] indexes = range(len(names)) for row in compiler.results_iter(chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size): yield {names[i]: row[i] for i in indexes} class ValuesListIterable(BaseIterable): """ Iterable returned by QuerySet.values_list(flat=False) that yields a tuple for each row. """ def __iter__(self): queryset = self.queryset query = queryset.query compiler = query.get_compiler(queryset.db) if queryset._fields: # extra(select=...) cols are always at the start of the row. names = [ *query.extra_select, *query.values_select, *query.annotation_select, ] fields = [*queryset._fields, *(f for f in query.annotation_select if f not in queryset._fields)] if fields != names: # Reorder according to fields. index_map = {name: idx for idx, name in enumerate(names)} rowfactory = operator.itemgetter(*[index_map[f] for f in fields]) return map( rowfactory, compiler.results_iter(chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size) ) return compiler.results_iter(tuple_expected=True, chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size) class NamedValuesListIterable(ValuesListIterable): """ Iterable returned by QuerySet.values_list(named=True) that yields a namedtuple for each row. """ @staticmethod @lru_cache() def create_namedtuple_class(*names): # Cache namedtuple() with @lru_cache() since it's too slow to be # called for every QuerySet evaluation. return namedtuple('Row', names) def __iter__(self): queryset = self.queryset if queryset._fields: names = queryset._fields else: query = queryset.query names = [*query.extra_select, *query.values_select, *query.annotation_select] tuple_class = self.create_namedtuple_class(*names) new = tuple.__new__ for row in super().__iter__(): yield new(tuple_class, row) class FlatValuesListIterable(BaseIterable): """ Iterable returned by QuerySet.values_list(flat=True) that yields single values. """ def __iter__(self): queryset = self.queryset compiler = queryset.query.get_compiler(queryset.db) for row in compiler.results_iter(chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size): yield row[0] class QuerySet: """Represent a lazy database lookup for a set of objects.""" def __init__(self, model=None, query=None, using=None, hints=None): self.model = model self._db = using self._hints = hints or {} self._query = query or sql.Query(self.model) self._result_cache = None self._sticky_filter = False self._for_write = False self._prefetch_related_lookups = () self._prefetch_done = False self._known_related_objects = {} # {rel_field: {pk: rel_obj}} self._iterable_class = ModelIterable self._fields = None self._defer_next_filter = False self._deferred_filter = None @property def query(self): if self._deferred_filter: negate, args, kwargs = self._deferred_filter self._filter_or_exclude_inplace(negate, *args, **kwargs) self._deferred_filter = None return self._query @query.setter def query(self, value): self._query = value def as_manager(cls): # Address the circular dependency between `Queryset` and `Manager`. from django.db.models.manager import Manager manager = Manager.from_queryset(cls)() manager._built_with_as_manager = True return manager as_manager.queryset_only = True as_manager = classmethod(as_manager) ######################## # PYTHON MAGIC METHODS # ######################## def __deepcopy__(self, memo): """Don't populate the QuerySet's cache.""" obj = self.__class__() for k, v in self.__dict__.items(): if k == '_result_cache': obj.__dict__[k] = None else: obj.__dict__[k] = copy.deepcopy(v, memo) return obj def __getstate__(self): # Force the cache to be fully populated. self._fetch_all() return {**self.__dict__, DJANGO_VERSION_PICKLE_KEY: get_version()} def __setstate__(self, state): msg = None pickled_version = state.get(DJANGO_VERSION_PICKLE_KEY) if pickled_version: current_version = get_version() if current_version != pickled_version: msg = ( "Pickled queryset instance's Django version %s does not " "match the current version %s." % (pickled_version, current_version) ) else: msg = "Pickled queryset instance's Django version is not specified." if msg: warnings.warn(msg, RuntimeWarning, stacklevel=2) self.__dict__.update(state) def __repr__(self): data = list(self[:REPR_OUTPUT_SIZE + 1]) if len(data) > REPR_OUTPUT_SIZE: data[-1] = "...(remaining elements truncated)..." return '<%s %r>' % (self.__class__.__name__, data) def __len__(self): self._fetch_all() return len(self._result_cache) def __iter__(self): """ The queryset iterator protocol uses three nested iterators in the default case: 1. sql.compiler.execute_sql() - Returns 100 rows at time (constants.GET_ITERATOR_CHUNK_SIZE) using cursor.fetchmany(). This part is responsible for doing some column masking, and returning the rows in chunks. 2. sql.compiler.results_iter() - Returns one row at time. At this point the rows are still just tuples. In some cases the return values are converted to Python values at this location. 3. self.iterator() - Responsible for turning the rows into model objects. """ self._fetch_all() return iter(self._result_cache) def __bool__(self): self._fetch_all() return bool(self._result_cache) def __getitem__(self, k): """Retrieve an item or slice from the set of results.""" if not isinstance(k, (int, slice)): raise TypeError( 'QuerySet indices must be integers or slices, not %s.' % type(k).__name__ ) assert ((not isinstance(k, slice) and (k >= 0)) or (isinstance(k, slice) and (k.start is None or k.start >= 0) and (k.stop is None or k.stop >= 0))), \ "Negative indexing is not supported." if self._result_cache is not None: return self._result_cache[k] if isinstance(k, slice): qs = self._chain() if k.start is not None: start = int(k.start) else: start = None if k.stop is not None: stop = int(k.stop) else: stop = None qs.query.set_limits(start, stop) return list(qs)[::k.step] if k.step else qs qs = self._chain() qs.query.set_limits(k, k + 1) qs._fetch_all() return qs._result_cache[0] def __and__(self, other): self._merge_sanity_check(other) if isinstance(other, EmptyQuerySet): return other if isinstance(self, EmptyQuerySet): return self combined = self._chain() combined._merge_known_related_objects(other) combined.query.combine(other.query, sql.AND) return combined def __or__(self, other): self._merge_sanity_check(other) if isinstance(self, EmptyQuerySet): return other if isinstance(other, EmptyQuerySet): return self query = self if self.query.can_filter() else self.model._base_manager.filter(pk__in=self.values('pk')) combined = query._chain() combined._merge_known_related_objects(other) if not other.query.can_filter(): other = other.model._base_manager.filter(pk__in=other.values('pk')) combined.query.combine(other.query, sql.OR) return combined #################################### # METHODS THAT DO DATABASE QUERIES # #################################### def _iterator(self, use_chunked_fetch, chunk_size): yield from self._iterable_class(self, chunked_fetch=use_chunked_fetch, chunk_size=chunk_size) def iterator(self, chunk_size=2000): """ An iterator over the results from applying this QuerySet to the database. """ if chunk_size <= 0: raise ValueError('Chunk size must be strictly positive.') use_chunked_fetch = not connections[self.db].settings_dict.get('DISABLE_SERVER_SIDE_CURSORS') return self._iterator(use_chunked_fetch, chunk_size) def aggregate(self, *args, **kwargs): """ Return a dictionary containing the calculations (aggregation) over the current queryset. If args is present the expression is passed as a kwarg using the Aggregate object's default alias. """ if self.query.distinct_fields: raise NotImplementedError("aggregate() + distinct(fields) not implemented.") self._validate_values_are_expressions((*args, *kwargs.values()), method_name='aggregate') for arg in args: # The default_alias property raises TypeError if default_alias # can't be set automatically or AttributeError if it isn't an # attribute. try: arg.default_alias except (AttributeError, TypeError): raise TypeError("Complex aggregates require an alias") kwargs[arg.default_alias] = arg query = self.query.chain() for (alias, aggregate_expr) in kwargs.items(): query.add_annotation(aggregate_expr, alias, is_summary=True) if not query.annotations[alias].contains_aggregate: raise TypeError("%s is not an aggregate expression" % alias) return query.get_aggregation(self.db, kwargs) def count(self): """ Perform a SELECT COUNT() and return the number of records as an integer. If the QuerySet is already fully cached, return the length of the cached results set to avoid multiple SELECT COUNT(*) calls. """ if self._result_cache is not None: return len(self._result_cache) return self.query.get_count(using=self.db) def get(self, *args, **kwargs): """ Perform the query and return a single object matching the given keyword arguments. """ clone = self._chain() if self.query.combinator else self.filter(*args, **kwargs) if self.query.can_filter() and not self.query.distinct_fields: clone = clone.order_by() limit = None if not clone.query.select_for_update or connections[clone.db].features.supports_select_for_update_with_limit: limit = MAX_GET_RESULTS clone.query.set_limits(high=limit) num = len(clone) if num == 1: return clone._result_cache[0] if not num: raise self.model.DoesNotExist( "%s matching query does not exist." % self.model._meta.object_name ) raise self.model.MultipleObjectsReturned( 'get() returned more than one %s -- it returned %s!' % ( self.model._meta.object_name, num if not limit or num < limit else 'more than %s' % (limit - 1), ) ) def create(self, **kwargs): """ Create a new object with the given kwargs, saving it to the database and returning the created object. """ obj = self.model(**kwargs) self._for_write = True obj.save(force_insert=True, using=self.db) return obj def _populate_pk_values(self, objs): for obj in objs: if obj.pk is None: obj.pk = obj._meta.pk.get_pk_value_on_save(obj) def bulk_create(self, objs, batch_size=None, ignore_conflicts=False): """ Insert each of the instances into the database. Do *not* call save() on each of the instances, do not send any pre/post_save signals, and do not set the primary key attribute if it is an autoincrement field (except if features.can_return_rows_from_bulk_insert=True). Multi-table models are not supported. """ # When you bulk insert you don't get the primary keys back (if it's an # autoincrement, except if can_return_rows_from_bulk_insert=True), so # you can't insert into the child tables which references this. There # are two workarounds: # 1) This could be implemented if you didn't have an autoincrement pk # 2) You could do it by doing O(n) normal inserts into the parent # tables to get the primary keys back and then doing a single bulk # insert into the childmost table. # We currently set the primary keys on the objects when using # PostgreSQL via the RETURNING ID clause. It should be possible for # Oracle as well, but the semantics for extracting the primary keys is # trickier so it's not done yet. assert batch_size is None or batch_size > 0 # Check that the parents share the same concrete model with the our # model to detect the inheritance pattern ConcreteGrandParent -> # MultiTableParent -> ProxyChild. Simply checking self.model._meta.proxy # would not identify that case as involving multiple tables. for parent in self.model._meta.get_parent_list(): if parent._meta.concrete_model is not self.model._meta.concrete_model: raise ValueError("Can't bulk create a multi-table inherited model") if not objs: return objs self._for_write = True connection = connections[self.db] opts = self.model._meta fields = opts.concrete_fields objs = list(objs) self._populate_pk_values(objs) with transaction.atomic(using=self.db, savepoint=False): objs_with_pk, objs_without_pk = partition(lambda o: o.pk is None, objs) if objs_with_pk: returned_columns = self._batched_insert( objs_with_pk, fields, batch_size, ignore_conflicts=ignore_conflicts, ) for obj_with_pk, results in zip(objs_with_pk, returned_columns): for result, field in zip(results, opts.db_returning_fields): if field != opts.pk: setattr(obj_with_pk, field.attname, result) for obj_with_pk in objs_with_pk: obj_with_pk._state.adding = False obj_with_pk._state.db = self.db if objs_without_pk: fields = [f for f in fields if not isinstance(f, AutoField)] returned_columns = self._batched_insert( objs_without_pk, fields, batch_size, ignore_conflicts=ignore_conflicts, ) if connection.features.can_return_rows_from_bulk_insert and not ignore_conflicts: assert len(returned_columns) == len(objs_without_pk) for obj_without_pk, results in zip(objs_without_pk, returned_columns): for result, field in zip(results, opts.db_returning_fields): setattr(obj_without_pk, field.attname, result) obj_without_pk._state.adding = False obj_without_pk._state.db = self.db return objs def bulk_update(self, objs, fields, batch_size=None): """ Update the given fields in each of the given objects in the database. """ if batch_size is not None and batch_size < 0: raise ValueError('Batch size must be a positive integer.') if not fields: raise ValueError('Field names must be given to bulk_update().') objs = tuple(objs) if any(obj.pk is None for obj in objs): raise ValueError('All bulk_update() objects must have a primary key set.') fields = [self.model._meta.get_field(name) for name in fields] if any(not f.concrete or f.many_to_many for f in fields): raise ValueError('bulk_update() can only be used with concrete fields.') if any(f.primary_key for f in fields): raise ValueError('bulk_update() cannot be used with primary key fields.') if not objs: return # PK is used twice in the resulting update query, once in the filter # and once in the WHEN. Each field will also have one CAST. max_batch_size = connections[self.db].ops.bulk_batch_size(['pk', 'pk'] + fields, objs) batch_size = min(batch_size, max_batch_size) if batch_size else max_batch_size requires_casting = connections[self.db].features.requires_casted_case_in_updates batches = (objs[i:i + batch_size] for i in range(0, len(objs), batch_size)) updates = [] for batch_objs in batches: update_kwargs = {} for field in fields: when_statements = [] for obj in batch_objs: attr = getattr(obj, field.attname) if not isinstance(attr, Expression): attr = Value(attr, output_field=field) when_statements.append(When(pk=obj.pk, then=attr)) case_statement = Case(*when_statements, output_field=field) if requires_casting: case_statement = Cast(case_statement, output_field=field) update_kwargs[field.attname] = case_statement updates.append(([obj.pk for obj in batch_objs], update_kwargs)) with transaction.atomic(using=self.db, savepoint=False): for pks, update_kwargs in updates: self.filter(pk__in=pks).update(**update_kwargs) bulk_update.alters_data = True def get_or_create(self, defaults=None, **kwargs): """ Look up an object with the given kwargs, creating one if necessary. Return a tuple of (object, created), where created is a boolean specifying whether an object was created. """ # The get() needs to be targeted at the write database in order # to avoid potential transaction consistency problems. self._for_write = True try: return self.get(**kwargs), False except self.model.DoesNotExist: params = self._extract_model_params(defaults, **kwargs) return self._create_object_from_params(kwargs, params) def update_or_create(self, defaults=None, **kwargs): """ Look up an object with the given kwargs, updating one with defaults if it exists, otherwise create a new one. Return a tuple (object, created), where created is a boolean specifying whether an object was created. """ defaults = defaults or {} self._for_write = True with transaction.atomic(using=self.db): try: obj = self.select_for_update().get(**kwargs) except self.model.DoesNotExist: params = self._extract_model_params(defaults, **kwargs) # Lock the row so that a concurrent update is blocked until # after update_or_create() has performed its save. obj, created = self._create_object_from_params(kwargs, params, lock=True) if created: return obj, created for k, v in resolve_callables(defaults): setattr(obj, k, v) obj.save(using=self.db) return obj, False def _create_object_from_params(self, lookup, params, lock=False): """ Try to create an object using passed params. Used by get_or_create() and update_or_create(). """ try: with transaction.atomic(using=self.db): params = dict(resolve_callables(params)) obj = self.create(**params) return obj, True except IntegrityError as e: try: qs = self.select_for_update() if lock else self return qs.get(**lookup), False except self.model.DoesNotExist: pass raise e def _extract_model_params(self, defaults, **kwargs): """ Prepare `params` for creating a model instance based on the given kwargs; for use by get_or_create() and update_or_create(). """ defaults = defaults or {} params = {k: v for k, v in kwargs.items() if LOOKUP_SEP not in k} params.update(defaults) property_names = self.model._meta._property_names invalid_params = [] for param in params: try: self.model._meta.get_field(param) except exceptions.FieldDoesNotExist: # It's okay to use a model's property if it has a setter. if not (param in property_names and getattr(self.model, param).fset): invalid_params.append(param) if invalid_params: raise exceptions.FieldError( "Invalid field name(s) for model %s: '%s'." % ( self.model._meta.object_name, "', '".join(sorted(invalid_params)), )) return params def _earliest(self, *fields): """ Return the earliest object according to fields (if given) or by the model's Meta.get_latest_by. """ if fields: order_by = fields else: order_by = getattr(self.model._meta, 'get_latest_by') if order_by and not isinstance(order_by, (tuple, list)): order_by = (order_by,) if order_by is None: raise ValueError( "earliest() and latest() require either fields as positional " "arguments or 'get_latest_by' in the model's Meta." ) assert not self.query.is_sliced, \ "Cannot change a query once a slice has been taken." obj = self._chain() obj.query.set_limits(high=1) obj.query.clear_ordering(force_empty=True) obj.query.add_ordering(*order_by) return obj.get() def earliest(self, *fields): return self._earliest(*fields) def latest(self, *fields): return self.reverse()._earliest(*fields) def first(self): """Return the first object of a query or None if no match is found.""" for obj in (self if self.ordered else self.order_by('pk'))[:1]: return obj def last(self): """Return the last object of a query or None if no match is found.""" for obj in (self.reverse() if self.ordered else self.order_by('-pk'))[:1]: return obj def in_bulk(self, id_list=None, *, field_name='pk'): """ Return a dictionary mapping each of the given IDs to the object with that ID. If `id_list` isn't provided, evaluate the entire QuerySet. """ assert not self.query.is_sliced, \ "Cannot use 'limit' or 'offset' with in_bulk" if field_name != 'pk' and not self.model._meta.get_field(field_name).unique: raise ValueError("in_bulk()'s field_name must be a unique field but %r isn't." % field_name) if id_list is not None: if not id_list: return {} filter_key = '{}__in'.format(field_name) batch_size = connections[self.db].features.max_query_params id_list = tuple(id_list) # If the database has a limit on the number of query parameters # (e.g. SQLite), retrieve objects in batches if necessary. if batch_size and batch_size < len(id_list): qs = () for offset in range(0, len(id_list), batch_size): batch = id_list[offset:offset + batch_size] qs += tuple(self.filter(**{filter_key: batch}).order_by()) else: qs = self.filter(**{filter_key: id_list}).order_by() else: qs = self._chain() return {getattr(obj, field_name): obj for obj in qs} def delete(self): """Delete the records in the current QuerySet.""" self._not_support_combined_queries('delete') assert not self.query.is_sliced, \ "Cannot use 'limit' or 'offset' with delete." if self._fields is not None: raise TypeError("Cannot call delete() after .values() or .values_list()") del_query = self._chain() # The delete is actually 2 queries - one to find related objects, # and one to delete. Make sure that the discovery of related # objects is performed on the same database as the deletion. del_query._for_write = True # Disable non-supported fields. del_query.query.select_for_update = False del_query.query.select_related = False del_query.query.clear_ordering(force_empty=True) collector = Collector(using=del_query.db) collector.collect(del_query) deleted, _rows_count = collector.delete() # Clear the result cache, in case this QuerySet gets reused. self._result_cache = None return deleted, _rows_count delete.alters_data = True delete.queryset_only = True def _raw_delete(self, using): """ Delete objects found from the given queryset in single direct SQL query. No signals are sent and there is no protection for cascades. """ query = self.query.clone() query.__class__ = sql.DeleteQuery cursor = query.get_compiler(using).execute_sql(CURSOR) return cursor.rowcount if cursor else 0 _raw_delete.alters_data = True def update(self, **kwargs): """ Update all elements in the current QuerySet, setting all the given fields to the appropriate values. """ self._not_support_combined_queries('update') assert not self.query.is_sliced, \ "Cannot update a query once a slice has been taken." self._for_write = True query = self.query.chain(sql.UpdateQuery) query.add_update_values(kwargs) # Clear any annotations so that they won't be present in subqueries. query.annotations = {} with transaction.mark_for_rollback_on_error(using=self.db): rows = query.get_compiler(self.db).execute_sql(CURSOR) self._result_cache = None return rows update.alters_data = True def _update(self, values): """ A version of update() that accepts field objects instead of field names. Used primarily for model saving and not intended for use by general code (it requires too much poking around at model internals to be useful at that level). """ assert not self.query.is_sliced, \ "Cannot update a query once a slice has been taken." query = self.query.chain(sql.UpdateQuery) query.add_update_fields(values) # Clear any annotations so that they won't be present in subqueries. query.annotations = {} self._result_cache = None return query.get_compiler(self.db).execute_sql(CURSOR) _update.alters_data = True _update.queryset_only = False def exists(self): if self._result_cache is None: return self.query.has_results(using=self.db) return bool(self._result_cache) def _prefetch_related_objects(self): # This method can only be called once the result cache has been filled. prefetch_related_objects(self._result_cache, *self._prefetch_related_lookups) self._prefetch_done = True def explain(self, *, format=None, **options): return self.query.explain(using=self.db, format=format, **options) ################################################## # PUBLIC METHODS THAT RETURN A QUERYSET SUBCLASS # ################################################## def raw(self, raw_query, params=None, translations=None, using=None): if using is None: using = self.db qs = RawQuerySet(raw_query, model=self.model, params=params, translations=translations, using=using) qs._prefetch_related_lookups = self._prefetch_related_lookups[:] return qs def _values(self, *fields, **expressions): clone = self._chain() if expressions: clone = clone.annotate(**expressions) clone._fields = fields clone.query.set_values(fields) return clone def values(self, *fields, **expressions): fields += tuple(expressions) clone = self._values(*fields, **expressions) clone._iterable_class = ValuesIterable return clone def values_list(self, *fields, flat=False, named=False): if flat and named: raise TypeError("'flat' and 'named' can't be used together.") if flat and len(fields) > 1: raise TypeError("'flat' is not valid when values_list is called with more than one field.") field_names = {f for f in fields if not hasattr(f, 'resolve_expression')} _fields = [] expressions = {} counter = 1 for field in fields: if hasattr(field, 'resolve_expression'): field_id_prefix = getattr(field, 'default_alias', field.__class__.__name__.lower()) while True: field_id = field_id_prefix + str(counter) counter += 1 if field_id not in field_names: break expressions[field_id] = field _fields.append(field_id) else: _fields.append(field) clone = self._values(*_fields, **expressions) clone._iterable_class = ( NamedValuesListIterable if named else FlatValuesListIterable if flat else ValuesListIterable ) return clone def dates(self, field_name, kind, order='ASC'): """ Return a list of date objects representing all available dates for the given field_name, scoped to 'kind'. """ assert kind in ('year', 'month', 'week', 'day'), \ "'kind' must be one of 'year', 'month', 'week', or 'day'." assert order in ('ASC', 'DESC'), \ "'order' must be either 'ASC' or 'DESC'." return self.annotate( datefield=Trunc(field_name, kind, output_field=DateField()), plain_field=F(field_name) ).values_list( 'datefield', flat=True ).distinct().filter(plain_field__isnull=False).order_by(('-' if order == 'DESC' else '') + 'datefield') def datetimes(self, field_name, kind, order='ASC', tzinfo=None): """ Return a list of datetime objects representing all available datetimes for the given field_name, scoped to 'kind'. """ assert kind in ('year', 'month', 'week', 'day', 'hour', 'minute', 'second'), \ "'kind' must be one of 'year', 'month', 'week', 'day', 'hour', 'minute', or 'second'." assert order in ('ASC', 'DESC'), \ "'order' must be either 'ASC' or 'DESC'." if settings.USE_TZ: if tzinfo is None: tzinfo = timezone.get_current_timezone() else: tzinfo = None return self.annotate( datetimefield=Trunc(field_name, kind, output_field=DateTimeField(), tzinfo=tzinfo), plain_field=F(field_name) ).values_list( 'datetimefield', flat=True ).distinct().filter(plain_field__isnull=False).order_by(('-' if order == 'DESC' else '') + 'datetimefield') def none(self): """Return an empty QuerySet.""" clone = self._chain() clone.query.set_empty() return clone ################################################################## # PUBLIC METHODS THAT ALTER ATTRIBUTES AND RETURN A NEW QUERYSET # ################################################################## def all(self): """ Return a new QuerySet that is a copy of the current one. This allows a QuerySet to proxy for a model manager in some cases. """ return self._chain() def filter(self, *args, **kwargs): """ Return a new QuerySet instance with the args ANDed to the existing set. """ self._not_support_combined_queries('filter') return self._filter_or_exclude(False, *args, **kwargs) def exclude(self, *args, **kwargs): """ Return a new QuerySet instance with NOT (args) ANDed to the existing set. """ self._not_support_combined_queries('exclude') return self._filter_or_exclude(True, *args, **kwargs) def _filter_or_exclude(self, negate, *args, **kwargs): if args or kwargs: assert not self.query.is_sliced, \ "Cannot filter a query once a slice has been taken." clone = self._chain() if self._defer_next_filter: self._defer_next_filter = False clone._deferred_filter = negate, args, kwargs else: clone._filter_or_exclude_inplace(negate, *args, **kwargs) return clone def _filter_or_exclude_inplace(self, negate, *args, **kwargs): if negate: self._query.add_q(~Q(*args, **kwargs)) else: self._query.add_q(Q(*args, **kwargs)) def complex_filter(self, filter_obj): """ Return a new QuerySet instance with filter_obj added to the filters. filter_obj can be a Q object or a dictionary of keyword lookup arguments. This exists to support framework features such as 'limit_choices_to', and usually it will be more natural to use other methods. """ if isinstance(filter_obj, Q): clone = self._chain() clone.query.add_q(filter_obj) return clone else: return self._filter_or_exclude(False, **filter_obj) def _combinator_query(self, combinator, *other_qs, all=False): # Clone the query to inherit the select list and everything clone = self._chain() # Clear limits and ordering so they can be reapplied clone.query.clear_ordering(True) clone.query.clear_limits() clone.query.combined_queries = (self.query,) + tuple(qs.query for qs in other_qs) clone.query.combinator = combinator clone.query.combinator_all = all return clone def union(self, *other_qs, all=False): # If the query is an EmptyQuerySet, combine all nonempty querysets. if isinstance(self, EmptyQuerySet): qs = [q for q in other_qs if not isinstance(q, EmptyQuerySet)] return qs[0]._combinator_query('union', *qs[1:], all=all) if qs else self return self._combinator_query('union', *other_qs, all=all) def intersection(self, *other_qs): # If any query is an EmptyQuerySet, return it. if isinstance(self, EmptyQuerySet): return self for other in other_qs: if isinstance(other, EmptyQuerySet): return other return self._combinator_query('intersection', *other_qs) def difference(self, *other_qs): # If the query is an EmptyQuerySet, return it. if isinstance(self, EmptyQuerySet): return self return self._combinator_query('difference', *other_qs) def select_for_update(self, nowait=False, skip_locked=False, of=()): """ Return a new QuerySet instance that will select objects with a FOR UPDATE lock. """ if nowait and skip_locked: raise ValueError('The nowait option cannot be used with skip_locked.') obj = self._chain() obj._for_write = True obj.query.select_for_update = True obj.query.select_for_update_nowait = nowait obj.query.select_for_update_skip_locked = skip_locked obj.query.select_for_update_of = of return obj def select_related(self, *fields): """ Return a new QuerySet instance that will select related objects. If fields are specified, they must be ForeignKey fields and only those related objects are included in the selection. If select_related(None) is called, clear the list. """ self._not_support_combined_queries('select_related') if self._fields is not None: raise TypeError("Cannot call select_related() after .values() or .values_list()") obj = self._chain() if fields == (None,): obj.query.select_related = False elif fields: obj.query.add_select_related(fields) else: obj.query.select_related = True return obj def prefetch_related(self, *lookups): """ Return a new QuerySet instance that will prefetch the specified Many-To-One and Many-To-Many related objects when the QuerySet is evaluated. When prefetch_related() is called more than once, append to the list of prefetch lookups. If prefetch_related(None) is called, clear the list. """ self._not_support_combined_queries('prefetch_related') clone = self._chain() if lookups == (None,): clone._prefetch_related_lookups = () else: for lookup in lookups: if isinstance(lookup, Prefetch): lookup = lookup.prefetch_to lookup = lookup.split(LOOKUP_SEP, 1)[0] if lookup in self.query._filtered_relations: raise ValueError('prefetch_related() is not supported with FilteredRelation.') clone._prefetch_related_lookups = clone._prefetch_related_lookups + lookups return clone def annotate(self, *args, **kwargs): """ Return a query set in which the returned objects have been annotated with extra data or aggregations. """ self._not_support_combined_queries('annotate') self._validate_values_are_expressions(args + tuple(kwargs.values()), method_name='annotate') annotations = {} for arg in args: # The default_alias property may raise a TypeError. try: if arg.default_alias in kwargs: raise ValueError("The named annotation '%s' conflicts with the " "default name for another annotation." % arg.default_alias) except TypeError: raise TypeError("Complex annotations require an alias") annotations[arg.default_alias] = arg annotations.update(kwargs) clone = self._chain() names = self._fields if names is None: names = set(chain.from_iterable( (field.name, field.attname) if hasattr(field, 'attname') else (field.name,) for field in self.model._meta.get_fields() )) for alias, annotation in annotations.items(): if alias in names: raise ValueError("The annotation '%s' conflicts with a field on " "the model." % alias) if isinstance(annotation, FilteredRelation): clone.query.add_filtered_relation(annotation, alias) else: clone.query.add_annotation(annotation, alias, is_summary=False) for alias, annotation in clone.query.annotations.items(): if alias in annotations and annotation.contains_aggregate: if clone._fields is None: clone.query.group_by = True else: clone.query.set_group_by() break return clone def order_by(self, *field_names): """Return a new QuerySet instance with the ordering changed.""" assert not self.query.is_sliced, \ "Cannot reorder a query once a slice has been taken." obj = self._chain() obj.query.clear_ordering(force_empty=False) obj.query.add_ordering(*field_names) return obj def distinct(self, *field_names): """ Return a new QuerySet instance that will select only distinct results. """ assert not self.query.is_sliced, \ "Cannot create distinct fields once a slice has been taken." obj = self._chain() obj.query.add_distinct_fields(*field_names) return obj def extra(self, select=None, where=None, params=None, tables=None, order_by=None, select_params=None): """Add extra SQL fragments to the query.""" self._not_support_combined_queries('extra') assert not self.query.is_sliced, \ "Cannot change a query once a slice has been taken" clone = self._chain() clone.query.add_extra(select, select_params, where, params, tables, order_by) return clone def reverse(self): """Reverse the ordering of the QuerySet.""" if self.query.is_sliced: raise TypeError('Cannot reverse a query once a slice has been taken.') clone = self._chain() clone.query.standard_ordering = not clone.query.standard_ordering return clone def defer(self, *fields): """ Defer the loading of data for certain fields until they are accessed. Add the set of deferred fields to any existing set of deferred fields. The only exception to this is if None is passed in as the only parameter, in which case removal all deferrals. """ self._not_support_combined_queries('defer') if self._fields is not None: raise TypeError("Cannot call defer() after .values() or .values_list()") clone = self._chain() if fields == (None,): clone.query.clear_deferred_loading() else: clone.query.add_deferred_loading(fields) return clone def only(self, *fields): """ Essentially, the opposite of defer(). Only the fields passed into this method and that are not already specified as deferred are loaded immediately when the queryset is evaluated. """ self._not_support_combined_queries('only') if self._fields is not None: raise TypeError("Cannot call only() after .values() or .values_list()") if fields == (None,): # Can only pass None to defer(), not only(), as the rest option. # That won't stop people trying to do this, so let's be explicit. raise TypeError("Cannot pass None as an argument to only().") for field in fields: field = field.split(LOOKUP_SEP, 1)[0] if field in self.query._filtered_relations: raise ValueError('only() is not supported with FilteredRelation.') clone = self._chain() clone.query.add_immediate_loading(fields) return clone def using(self, alias): """Select which database this QuerySet should execute against.""" clone = self._chain() clone._db = alias return clone ################################### # PUBLIC INTROSPECTION ATTRIBUTES # ################################### @property def ordered(self): """ Return True if the QuerySet is ordered -- i.e. has an order_by() clause or a default ordering on the model (or is empty). """ if isinstance(self, EmptyQuerySet): return True if self.query.extra_order_by or self.query.order_by: return True elif self.query.default_ordering and self.query.get_meta().ordering: return True else: return False @property def db(self): """Return the database used if this query is executed now.""" if self._for_write: return self._db or router.db_for_write(self.model, **self._hints) return self._db or router.db_for_read(self.model, **self._hints) ################### # PRIVATE METHODS # ################### def _insert(self, objs, fields, returning_fields=None, raw=False, using=None, ignore_conflicts=False): """ Insert a new record for the given model. This provides an interface to the InsertQuery class and is how Model.save() is implemented. """ self._for_write = True if using is None: using = self.db query = sql.InsertQuery(self.model, ignore_conflicts=ignore_conflicts) query.insert_values(fields, objs, raw=raw) return query.get_compiler(using=using).execute_sql(returning_fields) _insert.alters_data = True _insert.queryset_only = False def _batched_insert(self, objs, fields, batch_size, ignore_conflicts=False): """ Helper method for bulk_create() to insert objs one batch at a time. """ if ignore_conflicts and not connections[self.db].features.supports_ignore_conflicts: raise NotSupportedError('This database backend does not support ignoring conflicts.') ops = connections[self.db].ops max_batch_size = max(ops.bulk_batch_size(fields, objs), 1) batch_size = min(batch_size, max_batch_size) if batch_size else max_batch_size inserted_rows = [] bulk_return = connections[self.db].features.can_return_rows_from_bulk_insert for item in [objs[i:i + batch_size] for i in range(0, len(objs), batch_size)]: if bulk_return and not ignore_conflicts: inserted_columns = self._insert( item, fields=fields, using=self.db, returning_fields=self.model._meta.db_returning_fields, ignore_conflicts=ignore_conflicts, ) if isinstance(inserted_columns, list): inserted_rows.extend(inserted_columns) else: inserted_rows.append(inserted_columns) else: self._insert(item, fields=fields, using=self.db, ignore_conflicts=ignore_conflicts) return inserted_rows def _chain(self, **kwargs): """ Return a copy of the current QuerySet that's ready for another operation. """ obj = self._clone() if obj._sticky_filter: obj.query.filter_is_sticky = True obj._sticky_filter = False obj.__dict__.update(kwargs) return obj def _clone(self): """ Return a copy of the current QuerySet. A lightweight alternative to deepcopy(). """ c = self.__class__(model=self.model, query=self.query.chain(), using=self._db, hints=self._hints) c._sticky_filter = self._sticky_filter c._for_write = self._for_write c._prefetch_related_lookups = self._prefetch_related_lookups[:] c._known_related_objects = self._known_related_objects c._iterable_class = self._iterable_class c._fields = self._fields return c def _fetch_all(self): if self._result_cache is None: self._result_cache = list(self._iterable_class(self)) if self._prefetch_related_lookups and not self._prefetch_done: self._prefetch_related_objects() def _next_is_sticky(self): """ Indicate that the next filter call and the one following that should be treated as a single filter. This is only important when it comes to determining when to reuse tables for many-to-many filters. Required so that we can filter naturally on the results of related managers. This doesn't return a clone of the current QuerySet (it returns "self"). The method is only used internally and should be immediately followed by a filter() that does create a clone. """ self._sticky_filter = True return self def _merge_sanity_check(self, other): """Check that two QuerySet classes may be merged.""" if self._fields is not None and ( set(self.query.values_select) != set(other.query.values_select) or set(self.query.extra_select) != set(other.query.extra_select) or set(self.query.annotation_select) != set(other.query.annotation_select)): raise TypeError( "Merging '%s' classes must involve the same values in each case." % self.__class__.__name__ ) def _merge_known_related_objects(self, other): """ Keep track of all known related objects from either QuerySet instance. """ for field, objects in other._known_related_objects.items(): self._known_related_objects.setdefault(field, {}).update(objects) def resolve_expression(self, *args, **kwargs): if self._fields and len(self._fields) > 1: # values() queryset can only be used as nested queries # if they are set up to select only a single field. raise TypeError('Cannot use multi-field values as a filter value.') query = self.query.resolve_expression(*args, **kwargs) query._db = self._db return query resolve_expression.queryset_only = True def _add_hints(self, **hints): """ Update hinting information for use by routers. Add new key/values or overwrite existing key/values. """ self._hints.update(hints) def _has_filters(self): """ Check if this QuerySet has any filtering going on. This isn't equivalent with checking if all objects are present in results, for example, qs[1:]._has_filters() -> False. """ return self.query.has_filters() @staticmethod def _validate_values_are_expressions(values, method_name): invalid_args = sorted(str(arg) for arg in values if not hasattr(arg, 'resolve_expression')) if invalid_args: raise TypeError( 'QuerySet.%s() received non-expression(s): %s.' % ( method_name, ', '.join(invalid_args), ) ) def _not_support_combined_queries(self, operation_name): if self.query.combinator: raise NotSupportedError( 'Calling QuerySet.%s() after %s() is not supported.' % (operation_name, self.query.combinator) ) class InstanceCheckMeta(type): def __instancecheck__(self, instance): return isinstance(instance, QuerySet) and instance.query.is_empty() class EmptyQuerySet(metaclass=InstanceCheckMeta): """ Marker class to checking if a queryset is empty by .none(): isinstance(qs.none(), EmptyQuerySet) -> True """ def __init__(self, *args, **kwargs): raise TypeError("EmptyQuerySet can't be instantiated") class RawQuerySet: """ Provide an iterator which converts the results of raw SQL queries into annotated model instances. """ def __init__(self, raw_query, model=None, query=None, params=None, translations=None, using=None, hints=None): self.raw_query = raw_query self.model = model self._db = using self._hints = hints or {} self.query = query or sql.RawQuery(sql=raw_query, using=self.db, params=params) self.params = params or () self.translations = translations or {} self._result_cache = None self._prefetch_related_lookups = () self._prefetch_done = False def resolve_model_init_order(self): """Resolve the init field names and value positions.""" converter = connections[self.db].introspection.identifier_converter model_init_fields = [f for f in self.model._meta.fields if converter(f.column) in self.columns] annotation_fields = [(column, pos) for pos, column in enumerate(self.columns) if column not in self.model_fields] model_init_order = [self.columns.index(converter(f.column)) for f in model_init_fields] model_init_names = [f.attname for f in model_init_fields] return model_init_names, model_init_order, annotation_fields def prefetch_related(self, *lookups): """Same as QuerySet.prefetch_related()""" clone = self._clone() if lookups == (None,): clone._prefetch_related_lookups = () else: clone._prefetch_related_lookups = clone._prefetch_related_lookups + lookups return clone def _prefetch_related_objects(self): prefetch_related_objects(self._result_cache, *self._prefetch_related_lookups) self._prefetch_done = True def _clone(self): """Same as QuerySet._clone()""" c = self.__class__( self.raw_query, model=self.model, query=self.query, params=self.params, translations=self.translations, using=self._db, hints=self._hints ) c._prefetch_related_lookups = self._prefetch_related_lookups[:] return c def _fetch_all(self): if self._result_cache is None: self._result_cache = list(self.iterator()) if self._prefetch_related_lookups and not self._prefetch_done: self._prefetch_related_objects() def __len__(self): self._fetch_all() return len(self._result_cache) def __bool__(self): self._fetch_all() return bool(self._result_cache) def __iter__(self): self._fetch_all() return iter(self._result_cache) def iterator(self): # Cache some things for performance reasons outside the loop. db = self.db compiler = connections[db].ops.compiler('SQLCompiler')( self.query, connections[db], db ) query = iter(self.query) try: model_init_names, model_init_pos, annotation_fields = self.resolve_model_init_order() if self.model._meta.pk.attname not in model_init_names: raise exceptions.FieldDoesNotExist( 'Raw query must include the primary key' ) model_cls = self.model fields = [self.model_fields.get(c) for c in self.columns] converters = compiler.get_converters([ f.get_col(f.model._meta.db_table) if f else None for f in fields ]) if converters: query = compiler.apply_converters(query, converters) for values in query: # Associate fields to values model_init_values = [values[pos] for pos in model_init_pos] instance = model_cls.from_db(db, model_init_names, model_init_values) if annotation_fields: for column, pos in annotation_fields: setattr(instance, column, values[pos]) yield instance finally: # Done iterating the Query. If it has its own cursor, close it. if hasattr(self.query, 'cursor') and self.query.cursor: self.query.cursor.close() def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self.query) def __getitem__(self, k): return list(self)[k] @property def db(self): """Return the database used if this query is executed now.""" return self._db or router.db_for_read(self.model, **self._hints) def using(self, alias): """Select the database this RawQuerySet should execute against.""" return RawQuerySet( self.raw_query, model=self.model, query=self.query.chain(using=alias), params=self.params, translations=self.translations, using=alias, ) @cached_property def columns(self): """ A list of model field names in the order they'll appear in the query results. """ columns = self.query.get_columns() # Adjust any column names which don't match field names for (query_name, model_name) in self.translations.items(): # Ignore translations for nonexistent column names try: index = columns.index(query_name) except ValueError: pass else: columns[index] = model_name return columns @cached_property def model_fields(self): """A dict mapping column names to model field names.""" converter = connections[self.db].introspection.identifier_converter model_fields = {} for field in self.model._meta.fields: name, column = field.get_attname_column() model_fields[converter(column)] = field return model_fields class Prefetch: def __init__(self, lookup, queryset=None, to_attr=None): # `prefetch_through` is the path we traverse to perform the prefetch. self.prefetch_through = lookup # `prefetch_to` is the path to the attribute that stores the result. self.prefetch_to = lookup if queryset is not None and ( isinstance(queryset, RawQuerySet) or ( hasattr(queryset, '_iterable_class') and not issubclass(queryset._iterable_class, ModelIterable) ) ): raise ValueError( 'Prefetch querysets cannot use raw(), values(), and ' 'values_list().' ) if to_attr: self.prefetch_to = LOOKUP_SEP.join(lookup.split(LOOKUP_SEP)[:-1] + [to_attr]) self.queryset = queryset self.to_attr = to_attr def __getstate__(self): obj_dict = self.__dict__.copy() if self.queryset is not None: # Prevent the QuerySet from being evaluated obj_dict['queryset'] = self.queryset._chain( _result_cache=[], _prefetch_done=True, ) return obj_dict def add_prefix(self, prefix): self.prefetch_through = prefix + LOOKUP_SEP + self.prefetch_through self.prefetch_to = prefix + LOOKUP_SEP + self.prefetch_to def get_current_prefetch_to(self, level): return LOOKUP_SEP.join(self.prefetch_to.split(LOOKUP_SEP)[:level + 1]) def get_current_to_attr(self, level): parts = self.prefetch_to.split(LOOKUP_SEP) to_attr = parts[level] as_attr = self.to_attr and level == len(parts) - 1 return to_attr, as_attr def get_current_queryset(self, level): if self.get_current_prefetch_to(level) == self.prefetch_to: return self.queryset return None def __eq__(self, other): if not isinstance(other, Prefetch): return NotImplemented return self.prefetch_to == other.prefetch_to def __hash__(self): return hash((self.__class__, self.prefetch_to)) def normalize_prefetch_lookups(lookups, prefix=None): """Normalize lookups into Prefetch objects.""" ret = [] for lookup in lookups: if not isinstance(lookup, Prefetch): lookup = Prefetch(lookup) if prefix: lookup.add_prefix(prefix) ret.append(lookup) return ret def prefetch_related_objects(model_instances, *related_lookups): """ Populate prefetched object caches for a list of model instances based on the lookups/Prefetch instances given. """ if not model_instances: return # nothing to do # We need to be able to dynamically add to the list of prefetch_related # lookups that we look up (see below). So we need some book keeping to # ensure we don't do duplicate work. done_queries = {} # dictionary of things like 'foo__bar': [results] auto_lookups = set() # we add to this as we go through. followed_descriptors = set() # recursion protection all_lookups = normalize_prefetch_lookups(reversed(related_lookups)) while all_lookups: lookup = all_lookups.pop() if lookup.prefetch_to in done_queries: if lookup.queryset is not None: raise ValueError("'%s' lookup was already seen with a different queryset. " "You may need to adjust the ordering of your lookups." % lookup.prefetch_to) continue # Top level, the list of objects to decorate is the result cache # from the primary QuerySet. It won't be for deeper levels. obj_list = model_instances through_attrs = lookup.prefetch_through.split(LOOKUP_SEP) for level, through_attr in enumerate(through_attrs): # Prepare main instances if not obj_list: break prefetch_to = lookup.get_current_prefetch_to(level) if prefetch_to in done_queries: # Skip any prefetching, and any object preparation obj_list = done_queries[prefetch_to] continue # Prepare objects: good_objects = True for obj in obj_list: # Since prefetching can re-use instances, it is possible to have # the same instance multiple times in obj_list, so obj might # already be prepared. if not hasattr(obj, '_prefetched_objects_cache'): try: obj._prefetched_objects_cache = {} except (AttributeError, TypeError): # Must be an immutable object from # values_list(flat=True), for example (TypeError) or # a QuerySet subclass that isn't returning Model # instances (AttributeError), either in Django or a 3rd # party. prefetch_related() doesn't make sense, so quit. good_objects = False break if not good_objects: break # Descend down tree # We assume that objects retrieved are homogeneous (which is the premise # of prefetch_related), so what applies to first object applies to all. first_obj = obj_list[0] to_attr = lookup.get_current_to_attr(level)[0] prefetcher, descriptor, attr_found, is_fetched = get_prefetcher(first_obj, through_attr, to_attr) if not attr_found: raise AttributeError("Cannot find '%s' on %s object, '%s' is an invalid " "parameter to prefetch_related()" % (through_attr, first_obj.__class__.__name__, lookup.prefetch_through)) if level == len(through_attrs) - 1 and prefetcher is None: # Last one, this *must* resolve to something that supports # prefetching, otherwise there is no point adding it and the # developer asking for it has made a mistake. raise ValueError("'%s' does not resolve to an item that supports " "prefetching - this is an invalid parameter to " "prefetch_related()." % lookup.prefetch_through) if prefetcher is not None and not is_fetched: obj_list, additional_lookups = prefetch_one_level(obj_list, prefetcher, lookup, level) # We need to ensure we don't keep adding lookups from the # same relationships to stop infinite recursion. So, if we # are already on an automatically added lookup, don't add # the new lookups from relationships we've seen already. if not (prefetch_to in done_queries and lookup in auto_lookups and descriptor in followed_descriptors): done_queries[prefetch_to] = obj_list new_lookups = normalize_prefetch_lookups(reversed(additional_lookups), prefetch_to) auto_lookups.update(new_lookups) all_lookups.extend(new_lookups) followed_descriptors.add(descriptor) else: # Either a singly related object that has already been fetched # (e.g. via select_related), or hopefully some other property # that doesn't support prefetching but needs to be traversed. # We replace the current list of parent objects with the list # of related objects, filtering out empty or missing values so # that we can continue with nullable or reverse relations. new_obj_list = [] for obj in obj_list: if through_attr in getattr(obj, '_prefetched_objects_cache', ()): # If related objects have been prefetched, use the # cache rather than the object's through_attr. new_obj = list(obj._prefetched_objects_cache.get(through_attr)) else: try: new_obj = getattr(obj, through_attr) except exceptions.ObjectDoesNotExist: continue if new_obj is None: continue # We special-case `list` rather than something more generic # like `Iterable` because we don't want to accidentally match # user models that define __iter__. if isinstance(new_obj, list): new_obj_list.extend(new_obj) else: new_obj_list.append(new_obj) obj_list = new_obj_list def get_prefetcher(instance, through_attr, to_attr): """ For the attribute 'through_attr' on the given instance, find an object that has a get_prefetch_queryset(). Return a 4 tuple containing: (the object with get_prefetch_queryset (or None), the descriptor object representing this relationship (or None), a boolean that is False if the attribute was not found at all, a boolean that is True if the attribute has already been fetched) """ prefetcher = None is_fetched = False # For singly related objects, we have to avoid getting the attribute # from the object, as this will trigger the query. So we first try # on the class, in order to get the descriptor object. rel_obj_descriptor = getattr(instance.__class__, through_attr, None) if rel_obj_descriptor is None: attr_found = hasattr(instance, through_attr) else: attr_found = True if rel_obj_descriptor: # singly related object, descriptor object has the # get_prefetch_queryset() method. if hasattr(rel_obj_descriptor, 'get_prefetch_queryset'): prefetcher = rel_obj_descriptor if rel_obj_descriptor.is_cached(instance): is_fetched = True else: # descriptor doesn't support prefetching, so we go ahead and get # the attribute on the instance rather than the class to # support many related managers rel_obj = getattr(instance, through_attr) if hasattr(rel_obj, 'get_prefetch_queryset'): prefetcher = rel_obj if through_attr != to_attr: # Special case cached_property instances because hasattr # triggers attribute computation and assignment. if isinstance(getattr(instance.__class__, to_attr, None), cached_property): is_fetched = to_attr in instance.__dict__ else: is_fetched = hasattr(instance, to_attr) else: is_fetched = through_attr in instance._prefetched_objects_cache return prefetcher, rel_obj_descriptor, attr_found, is_fetched def prefetch_one_level(instances, prefetcher, lookup, level): """ Helper function for prefetch_related_objects(). Run prefetches on all instances using the prefetcher object, assigning results to relevant caches in instance. Return the prefetched objects along with any additional prefetches that must be done due to prefetch_related lookups found from default managers. """ # prefetcher must have a method get_prefetch_queryset() which takes a list # of instances, and returns a tuple: # (queryset of instances of self.model that are related to passed in instances, # callable that gets value to be matched for returned instances, # callable that gets value to be matched for passed in instances, # boolean that is True for singly related objects, # cache or field name to assign to, # boolean that is True when the previous argument is a cache name vs a field name). # The 'values to be matched' must be hashable as they will be used # in a dictionary. rel_qs, rel_obj_attr, instance_attr, single, cache_name, is_descriptor = ( prefetcher.get_prefetch_queryset(instances, lookup.get_current_queryset(level))) # We have to handle the possibility that the QuerySet we just got back # contains some prefetch_related lookups. We don't want to trigger the # prefetch_related functionality by evaluating the query. Rather, we need # to merge in the prefetch_related lookups. # Copy the lookups in case it is a Prefetch object which could be reused # later (happens in nested prefetch_related). additional_lookups = [ copy.copy(additional_lookup) for additional_lookup in getattr(rel_qs, '_prefetch_related_lookups', ()) ] if additional_lookups: # Don't need to clone because the manager should have given us a fresh # instance, so we access an internal instead of using public interface # for performance reasons. rel_qs._prefetch_related_lookups = () all_related_objects = list(rel_qs) rel_obj_cache = {} for rel_obj in all_related_objects: rel_attr_val = rel_obj_attr(rel_obj) rel_obj_cache.setdefault(rel_attr_val, []).append(rel_obj) to_attr, as_attr = lookup.get_current_to_attr(level) # Make sure `to_attr` does not conflict with a field. if as_attr and instances: # We assume that objects retrieved are homogeneous (which is the premise # of prefetch_related), so what applies to first object applies to all. model = instances[0].__class__ try: model._meta.get_field(to_attr) except exceptions.FieldDoesNotExist: pass else: msg = 'to_attr={} conflicts with a field on the {} model.' raise ValueError(msg.format(to_attr, model.__name__)) # Whether or not we're prefetching the last part of the lookup. leaf = len(lookup.prefetch_through.split(LOOKUP_SEP)) - 1 == level for obj in instances: instance_attr_val = instance_attr(obj) vals = rel_obj_cache.get(instance_attr_val, []) if single: val = vals[0] if vals else None if as_attr: # A to_attr has been given for the prefetch. setattr(obj, to_attr, val) elif is_descriptor: # cache_name points to a field name in obj. # This field is a descriptor for a related object. setattr(obj, cache_name, val) else: # No to_attr has been given for this prefetch operation and the # cache_name does not point to a descriptor. Store the value of # the field in the object's field cache. obj._state.fields_cache[cache_name] = val else: if as_attr: setattr(obj, to_attr, vals) else: manager = getattr(obj, to_attr) if leaf and lookup.queryset is not None: qs = manager._apply_rel_filters(lookup.queryset) else: qs = manager.get_queryset() qs._result_cache = vals # We don't want the individual qs doing prefetch_related now, # since we have merged this into the current work. qs._prefetch_done = True obj._prefetched_objects_cache[cache_name] = qs return all_related_objects, additional_lookups class RelatedPopulator: """ RelatedPopulator is used for select_related() object instantiation. The idea is that each select_related() model will be populated by a different RelatedPopulator instance. The RelatedPopulator instances get klass_info and select (computed in SQLCompiler) plus the used db as input for initialization. That data is used to compute which columns to use, how to instantiate the model, and how to populate the links between the objects. The actual creation of the objects is done in populate() method. This method gets row and from_obj as input and populates the select_related() model instance. """ def __init__(self, klass_info, select, db): self.db = db # Pre-compute needed attributes. The attributes are: # - model_cls: the possibly deferred model class to instantiate # - either: # - cols_start, cols_end: usually the columns in the row are # in the same order model_cls.__init__ expects them, so we # can instantiate by model_cls(*row[cols_start:cols_end]) # - reorder_for_init: When select_related descends to a child # class, then we want to reuse the already selected parent # data. However, in this case the parent data isn't necessarily # in the same order that Model.__init__ expects it to be, so # we have to reorder the parent data. The reorder_for_init # attribute contains a function used to reorder the field data # in the order __init__ expects it. # - pk_idx: the index of the primary key field in the reordered # model data. Used to check if a related object exists at all. # - init_list: the field attnames fetched from the database. For # deferred models this isn't the same as all attnames of the # model's fields. # - related_populators: a list of RelatedPopulator instances if # select_related() descends to related models from this model. # - local_setter, remote_setter: Methods to set cached values on # the object being populated and on the remote object. Usually # these are Field.set_cached_value() methods. select_fields = klass_info['select_fields'] from_parent = klass_info['from_parent'] if not from_parent: self.cols_start = select_fields[0] self.cols_end = select_fields[-1] + 1 self.init_list = [ f[0].target.attname for f in select[self.cols_start:self.cols_end] ] self.reorder_for_init = None else: attname_indexes = {select[idx][0].target.attname: idx for idx in select_fields} model_init_attnames = (f.attname for f in klass_info['model']._meta.concrete_fields) self.init_list = [attname for attname in model_init_attnames if attname in attname_indexes] self.reorder_for_init = operator.itemgetter(*[attname_indexes[attname] for attname in self.init_list]) self.model_cls = klass_info['model'] self.pk_idx = self.init_list.index(self.model_cls._meta.pk.attname) self.related_populators = get_related_populators(klass_info, select, self.db) self.local_setter = klass_info['local_setter'] self.remote_setter = klass_info['remote_setter'] def populate(self, row, from_obj): if self.reorder_for_init: obj_data = self.reorder_for_init(row) else: obj_data = row[self.cols_start:self.cols_end] if obj_data[self.pk_idx] is None: obj = None else: obj = self.model_cls.from_db(self.db, self.init_list, obj_data) for rel_iter in self.related_populators: rel_iter.populate(row, obj) self.local_setter(from_obj, obj) if obj is not None: self.remote_setter(obj, from_obj) def get_related_populators(klass_info, select, db): iterators = [] related_klass_infos = klass_info.get('related_klass_infos', []) for rel_klass_info in related_klass_infos: rel_cls = RelatedPopulator(rel_klass_info, select, db) iterators.append(rel_cls) return iterators
fd553504f8e84cc31f68de4eb984646a4097a42f24aef3f31170d5b438bae899
import bisect import copy import inspect from collections import defaultdict from django.apps import apps from django.conf import settings from django.core.exceptions import FieldDoesNotExist, ImproperlyConfigured from django.db import connections from django.db.models import Manager from django.db.models.fields import AutoField from django.db.models.fields.proxy import OrderWrt from django.db.models.query_utils import PathInfo from django.utils.datastructures import ImmutableList, OrderedSet from django.utils.functional import cached_property from django.utils.text import camel_case_to_spaces, format_lazy from django.utils.translation import override PROXY_PARENTS = object() EMPTY_RELATION_TREE = () IMMUTABLE_WARNING = ( "The return type of '%s' should never be mutated. If you want to manipulate this list " "for your own use, make a copy first." ) DEFAULT_NAMES = ( 'verbose_name', 'verbose_name_plural', 'db_table', 'ordering', 'unique_together', 'permissions', 'get_latest_by', 'order_with_respect_to', 'app_label', 'db_tablespace', 'abstract', 'managed', 'proxy', 'swappable', 'auto_created', 'index_together', 'apps', 'default_permissions', 'select_on_save', 'default_related_name', 'required_db_features', 'required_db_vendor', 'base_manager_name', 'default_manager_name', 'indexes', 'constraints', ) def normalize_together(option_together): """ option_together can be either a tuple of tuples, or a single tuple of two strings. Normalize it to a tuple of tuples, so that calling code can uniformly expect that. """ try: if not option_together: return () if not isinstance(option_together, (tuple, list)): raise TypeError first_element = option_together[0] if not isinstance(first_element, (tuple, list)): option_together = (option_together,) # Normalize everything to tuples return tuple(tuple(ot) for ot in option_together) except TypeError: # If the value of option_together isn't valid, return it # verbatim; this will be picked up by the check framework later. return option_together def make_immutable_fields_list(name, data): return ImmutableList(data, warning=IMMUTABLE_WARNING % name) class Options: FORWARD_PROPERTIES = { 'fields', 'many_to_many', 'concrete_fields', 'local_concrete_fields', '_forward_fields_map', 'managers', 'managers_map', 'base_manager', 'default_manager', } REVERSE_PROPERTIES = {'related_objects', 'fields_map', '_relation_tree'} default_apps = apps def __init__(self, meta, app_label=None): self._get_fields_cache = {} self.local_fields = [] self.local_many_to_many = [] self.private_fields = [] self.local_managers = [] self.base_manager_name = None self.default_manager_name = None self.model_name = None self.verbose_name = None self.verbose_name_plural = None self.db_table = '' self.ordering = [] self._ordering_clash = False self.indexes = [] self.constraints = [] self.unique_together = [] self.index_together = [] self.select_on_save = False self.default_permissions = ('add', 'change', 'delete', 'view') self.permissions = [] self.object_name = None self.app_label = app_label self.get_latest_by = None self.order_with_respect_to = None self.db_tablespace = settings.DEFAULT_TABLESPACE self.required_db_features = [] self.required_db_vendor = None self.meta = meta self.pk = None self.auto_field = None self.abstract = False self.managed = True self.proxy = False # For any class that is a proxy (including automatically created # classes for deferred object loading), proxy_for_model tells us # which class this model is proxying. Note that proxy_for_model # can create a chain of proxy models. For non-proxy models, the # variable is always None. self.proxy_for_model = None # For any non-abstract class, the concrete class is the model # in the end of the proxy_for_model chain. In particular, for # concrete models, the concrete_model is always the class itself. self.concrete_model = None self.swappable = None self.parents = {} self.auto_created = False # List of all lookups defined in ForeignKey 'limit_choices_to' options # from *other* models. Needed for some admin checks. Internal use only. self.related_fkey_lookups = [] # A custom app registry to use, if you're making a separate model set. self.apps = self.default_apps self.default_related_name = None @property def label(self): return '%s.%s' % (self.app_label, self.object_name) @property def label_lower(self): return '%s.%s' % (self.app_label, self.model_name) @property def app_config(self): # Don't go through get_app_config to avoid triggering imports. return self.apps.app_configs.get(self.app_label) @property def installed(self): return self.app_config is not None def contribute_to_class(self, cls, name): from django.db import connection from django.db.backends.utils import truncate_name cls._meta = self self.model = cls # First, construct the default values for these options. self.object_name = cls.__name__ self.model_name = self.object_name.lower() self.verbose_name = camel_case_to_spaces(self.object_name) # Store the original user-defined values for each option, # for use when serializing the model definition self.original_attrs = {} # Next, apply any overridden values from 'class Meta'. if self.meta: meta_attrs = self.meta.__dict__.copy() for name in self.meta.__dict__: # Ignore any private attributes that Django doesn't care about. # NOTE: We can't modify a dictionary's contents while looping # over it, so we loop over the *original* dictionary instead. if name.startswith('_'): del meta_attrs[name] for attr_name in DEFAULT_NAMES: if attr_name in meta_attrs: setattr(self, attr_name, meta_attrs.pop(attr_name)) self.original_attrs[attr_name] = getattr(self, attr_name) elif hasattr(self.meta, attr_name): setattr(self, attr_name, getattr(self.meta, attr_name)) self.original_attrs[attr_name] = getattr(self, attr_name) self.unique_together = normalize_together(self.unique_together) self.index_together = normalize_together(self.index_together) # App label/class name interpolation for names of constraints and # indexes. if not getattr(cls._meta, 'abstract', False): for attr_name in {'constraints', 'indexes'}: objs = getattr(self, attr_name, []) setattr(self, attr_name, self._format_names_with_class(cls, objs)) # verbose_name_plural is a special case because it uses a 's' # by default. if self.verbose_name_plural is None: self.verbose_name_plural = format_lazy('{}s', self.verbose_name) # order_with_respect_and ordering are mutually exclusive. self._ordering_clash = bool(self.ordering and self.order_with_respect_to) # Any leftover attributes must be invalid. if meta_attrs != {}: raise TypeError("'class Meta' got invalid attribute(s): %s" % ','.join(meta_attrs)) else: self.verbose_name_plural = format_lazy('{}s', self.verbose_name) del self.meta # If the db_table wasn't provided, use the app_label + model_name. if not self.db_table: self.db_table = "%s_%s" % (self.app_label, self.model_name) self.db_table = truncate_name(self.db_table, connection.ops.max_name_length()) def _format_names_with_class(self, cls, objs): """App label/class name interpolation for object names.""" new_objs = [] for obj in objs: obj = obj.clone() obj.name = obj.name % { 'app_label': cls._meta.app_label.lower(), 'class': cls.__name__.lower(), } new_objs.append(obj) return new_objs def _prepare(self, model): if self.order_with_respect_to: # The app registry will not be ready at this point, so we cannot # use get_field(). query = self.order_with_respect_to try: self.order_with_respect_to = next( f for f in self._get_fields(reverse=False) if f.name == query or f.attname == query ) except StopIteration: raise FieldDoesNotExist("%s has no field named '%s'" % (self.object_name, query)) self.ordering = ('_order',) if not any(isinstance(field, OrderWrt) for field in model._meta.local_fields): model.add_to_class('_order', OrderWrt()) else: self.order_with_respect_to = None if self.pk is None: if self.parents: # Promote the first parent link in lieu of adding yet another # field. field = next(iter(self.parents.values())) # Look for a local field with the same name as the # first parent link. If a local field has already been # created, use it instead of promoting the parent already_created = [fld for fld in self.local_fields if fld.name == field.name] if already_created: field = already_created[0] field.primary_key = True self.setup_pk(field) if not field.remote_field.parent_link: raise ImproperlyConfigured( 'Add parent_link=True to %s.' % field, ) else: auto = AutoField(verbose_name='ID', primary_key=True, auto_created=True) model.add_to_class('id', auto) def add_manager(self, manager): self.local_managers.append(manager) self._expire_cache() def add_field(self, field, private=False): # Insert the given field in the order in which it was created, using # the "creation_counter" attribute of the field. # Move many-to-many related fields from self.fields into # self.many_to_many. if private: self.private_fields.append(field) elif field.is_relation and field.many_to_many: bisect.insort(self.local_many_to_many, field) else: bisect.insort(self.local_fields, field) self.setup_pk(field) # If the field being added is a relation to another known field, # expire the cache on this field and the forward cache on the field # being referenced, because there will be new relationships in the # cache. Otherwise, expire the cache of references *to* this field. # The mechanism for getting at the related model is slightly odd - # ideally, we'd just ask for field.related_model. However, related_model # is a cached property, and all the models haven't been loaded yet, so # we need to make sure we don't cache a string reference. if field.is_relation and hasattr(field.remote_field, 'model') and field.remote_field.model: try: field.remote_field.model._meta._expire_cache(forward=False) except AttributeError: pass self._expire_cache() else: self._expire_cache(reverse=False) def setup_pk(self, field): if not self.pk and field.primary_key: self.pk = field field.serialize = False def setup_proxy(self, target): """ Do the internal setup so that the current model is a proxy for "target". """ self.pk = target._meta.pk self.proxy_for_model = target self.db_table = target._meta.db_table def __repr__(self): return '<Options for %s>' % self.object_name def __str__(self): return "%s.%s" % (self.app_label, self.model_name) def can_migrate(self, connection): """ Return True if the model can/should be migrated on the `connection`. `connection` can be either a real connection or a connection alias. """ if self.proxy or self.swapped or not self.managed: return False if isinstance(connection, str): connection = connections[connection] if self.required_db_vendor: return self.required_db_vendor == connection.vendor if self.required_db_features: return all(getattr(connection.features, feat, False) for feat in self.required_db_features) return True @property def verbose_name_raw(self): """Return the untranslated verbose name.""" with override(None): return str(self.verbose_name) @property def swapped(self): """ Has this model been swapped out for another? If so, return the model name of the replacement; otherwise, return None. For historical reasons, model name lookups using get_model() are case insensitive, so we make sure we are case insensitive here. """ if self.swappable: swapped_for = getattr(settings, self.swappable, None) if swapped_for: try: swapped_label, swapped_object = swapped_for.split('.') except ValueError: # setting not in the format app_label.model_name # raising ImproperlyConfigured here causes problems with # test cleanup code - instead it is raised in get_user_model # or as part of validation. return swapped_for if '%s.%s' % (swapped_label, swapped_object.lower()) != self.label_lower: return swapped_for return None @cached_property def managers(self): managers = [] seen_managers = set() bases = (b for b in self.model.mro() if hasattr(b, '_meta')) for depth, base in enumerate(bases): for manager in base._meta.local_managers: if manager.name in seen_managers: continue manager = copy.copy(manager) manager.model = self.model seen_managers.add(manager.name) managers.append((depth, manager.creation_counter, manager)) return make_immutable_fields_list( "managers", (m[2] for m in sorted(managers)), ) @cached_property def managers_map(self): return {manager.name: manager for manager in self.managers} @cached_property def base_manager(self): base_manager_name = self.base_manager_name if not base_manager_name: # Get the first parent's base_manager_name if there's one. for parent in self.model.mro()[1:]: if hasattr(parent, '_meta'): if parent._base_manager.name != '_base_manager': base_manager_name = parent._base_manager.name break if base_manager_name: try: return self.managers_map[base_manager_name] except KeyError: raise ValueError( "%s has no manager named %r" % ( self.object_name, base_manager_name, ) ) manager = Manager() manager.name = '_base_manager' manager.model = self.model manager.auto_created = True return manager @cached_property def default_manager(self): default_manager_name = self.default_manager_name if not default_manager_name and not self.local_managers: # Get the first parent's default_manager_name if there's one. for parent in self.model.mro()[1:]: if hasattr(parent, '_meta'): default_manager_name = parent._meta.default_manager_name break if default_manager_name: try: return self.managers_map[default_manager_name] except KeyError: raise ValueError( "%s has no manager named %r" % ( self.object_name, default_manager_name, ) ) if self.managers: return self.managers[0] @cached_property def fields(self): """ Return a list of all forward fields on the model and its parents, excluding ManyToManyFields. Private API intended only to be used by Django itself; get_fields() combined with filtering of field properties is the public API for obtaining this field list. """ # For legacy reasons, the fields property should only contain forward # fields that are not private or with a m2m cardinality. Therefore we # pass these three filters as filters to the generator. # The third lambda is a longwinded way of checking f.related_model - we don't # use that property directly because related_model is a cached property, # and all the models may not have been loaded yet; we don't want to cache # the string reference to the related_model. def is_not_an_m2m_field(f): return not (f.is_relation and f.many_to_many) def is_not_a_generic_relation(f): return not (f.is_relation and f.one_to_many) def is_not_a_generic_foreign_key(f): return not ( f.is_relation and f.many_to_one and not (hasattr(f.remote_field, 'model') and f.remote_field.model) ) return make_immutable_fields_list( "fields", (f for f in self._get_fields(reverse=False) if is_not_an_m2m_field(f) and is_not_a_generic_relation(f) and is_not_a_generic_foreign_key(f)) ) @cached_property def concrete_fields(self): """ Return a list of all concrete fields on the model and its parents. Private API intended only to be used by Django itself; get_fields() combined with filtering of field properties is the public API for obtaining this field list. """ return make_immutable_fields_list( "concrete_fields", (f for f in self.fields if f.concrete) ) @cached_property def local_concrete_fields(self): """ Return a list of all concrete fields on the model. Private API intended only to be used by Django itself; get_fields() combined with filtering of field properties is the public API for obtaining this field list. """ return make_immutable_fields_list( "local_concrete_fields", (f for f in self.local_fields if f.concrete) ) @cached_property def many_to_many(self): """ Return a list of all many to many fields on the model and its parents. Private API intended only to be used by Django itself; get_fields() combined with filtering of field properties is the public API for obtaining this list. """ return make_immutable_fields_list( "many_to_many", (f for f in self._get_fields(reverse=False) if f.is_relation and f.many_to_many) ) @cached_property def related_objects(self): """ Return all related objects pointing to the current model. The related objects can come from a one-to-one, one-to-many, or many-to-many field relation type. Private API intended only to be used by Django itself; get_fields() combined with filtering of field properties is the public API for obtaining this field list. """ all_related_fields = self._get_fields(forward=False, reverse=True, include_hidden=True) return make_immutable_fields_list( "related_objects", (obj for obj in all_related_fields if not obj.hidden or obj.field.many_to_many) ) @cached_property def _forward_fields_map(self): res = {} fields = self._get_fields(reverse=False) for field in fields: res[field.name] = field # Due to the way Django's internals work, get_field() should also # be able to fetch a field by attname. In the case of a concrete # field with relation, includes the *_id name too try: res[field.attname] = field except AttributeError: pass return res @cached_property def fields_map(self): res = {} fields = self._get_fields(forward=False, include_hidden=True) for field in fields: res[field.name] = field # Due to the way Django's internals work, get_field() should also # be able to fetch a field by attname. In the case of a concrete # field with relation, includes the *_id name too try: res[field.attname] = field except AttributeError: pass return res def get_field(self, field_name): """ Return a field instance given the name of a forward or reverse field. """ try: # In order to avoid premature loading of the relation tree # (expensive) we prefer checking if the field is a forward field. return self._forward_fields_map[field_name] except KeyError: # If the app registry is not ready, reverse fields are # unavailable, therefore we throw a FieldDoesNotExist exception. if not self.apps.models_ready: raise FieldDoesNotExist( "%s has no field named '%s'. The app cache isn't ready yet, " "so if this is an auto-created related field, it won't " "be available yet." % (self.object_name, field_name) ) try: # Retrieve field instance by name from cached or just-computed # field map. return self.fields_map[field_name] except KeyError: raise FieldDoesNotExist("%s has no field named '%s'" % (self.object_name, field_name)) def get_base_chain(self, model): """ Return a list of parent classes leading to `model` (ordered from closest to most distant ancestor). This has to handle the case where `model` is a grandparent or even more distant relation. """ if not self.parents: return [] if model in self.parents: return [model] for parent in self.parents: res = parent._meta.get_base_chain(model) if res: res.insert(0, parent) return res return [] def get_parent_list(self): """ Return all the ancestors of this model as a list ordered by MRO. Useful for determining if something is an ancestor, regardless of lineage. """ result = OrderedSet(self.parents) for parent in self.parents: for ancestor in parent._meta.get_parent_list(): result.add(ancestor) return list(result) def get_ancestor_link(self, ancestor): """ Return the field on the current model which points to the given "ancestor". This is possible an indirect link (a pointer to a parent model, which points, eventually, to the ancestor). Used when constructing table joins for model inheritance. Return None if the model isn't an ancestor of this one. """ if ancestor in self.parents: return self.parents[ancestor] for parent in self.parents: # Tries to get a link field from the immediate parent parent_link = parent._meta.get_ancestor_link(ancestor) if parent_link: # In case of a proxied model, the first link # of the chain to the ancestor is that parent # links return self.parents[parent] or parent_link def get_path_to_parent(self, parent): """ Return a list of PathInfos containing the path from the current model to the parent model, or an empty list if parent is not a parent of the current model. """ if self.model is parent: return [] # Skip the chain of proxy to the concrete proxied model. proxied_model = self.concrete_model path = [] opts = self for int_model in self.get_base_chain(parent): if int_model is proxied_model: opts = int_model._meta else: final_field = opts.parents[int_model] targets = (final_field.remote_field.get_related_field(),) opts = int_model._meta path.append(PathInfo( from_opts=final_field.model._meta, to_opts=opts, target_fields=targets, join_field=final_field, m2m=False, direct=True, filtered_relation=None, )) return path def get_path_from_parent(self, parent): """ Return a list of PathInfos containing the path from the parent model to the current model, or an empty list if parent is not a parent of the current model. """ if self.model is parent: return [] model = self.concrete_model # Get a reversed base chain including both the current and parent # models. chain = model._meta.get_base_chain(parent) chain.reverse() chain.append(model) # Construct a list of the PathInfos between models in chain. path = [] for i, ancestor in enumerate(chain[:-1]): child = chain[i + 1] link = child._meta.get_ancestor_link(ancestor) path.extend(link.get_reverse_path_info()) return path def _populate_directed_relation_graph(self): """ This method is used by each model to find its reverse objects. As this method is very expensive and is accessed frequently (it looks up every field in a model, in every app), it is computed on first access and then is set as a property on every model. """ related_objects_graph = defaultdict(list) all_models = self.apps.get_models(include_auto_created=True) for model in all_models: opts = model._meta # Abstract model's fields are copied to child models, hence we will # see the fields from the child models. if opts.abstract: continue fields_with_relations = ( f for f in opts._get_fields(reverse=False, include_parents=False) if f.is_relation and f.related_model is not None ) for f in fields_with_relations: if not isinstance(f.remote_field.model, str): related_objects_graph[f.remote_field.model._meta.concrete_model._meta].append(f) for model in all_models: # Set the relation_tree using the internal __dict__. In this way # we avoid calling the cached property. In attribute lookup, # __dict__ takes precedence over a data descriptor (such as # @cached_property). This means that the _meta._relation_tree is # only called if related_objects is not in __dict__. related_objects = related_objects_graph[model._meta.concrete_model._meta] model._meta.__dict__['_relation_tree'] = related_objects # It seems it is possible that self is not in all_models, so guard # against that with default for get(). return self.__dict__.get('_relation_tree', EMPTY_RELATION_TREE) @cached_property def _relation_tree(self): return self._populate_directed_relation_graph() def _expire_cache(self, forward=True, reverse=True): # This method is usually called by apps.cache_clear(), when the # registry is finalized, or when a new field is added. if forward: for cache_key in self.FORWARD_PROPERTIES: if cache_key in self.__dict__: delattr(self, cache_key) if reverse and not self.abstract: for cache_key in self.REVERSE_PROPERTIES: if cache_key in self.__dict__: delattr(self, cache_key) self._get_fields_cache = {} def get_fields(self, include_parents=True, include_hidden=False): """ Return a list of fields associated to the model. By default, include forward and reverse fields, fields derived from inheritance, but not hidden fields. The returned fields can be changed using the parameters: - include_parents: include fields derived from inheritance - include_hidden: include fields that have a related_name that starts with a "+" """ if include_parents is False: include_parents = PROXY_PARENTS return self._get_fields(include_parents=include_parents, include_hidden=include_hidden) def _get_fields(self, forward=True, reverse=True, include_parents=True, include_hidden=False, seen_models=None): """ Internal helper function to return fields of the model. * If forward=True, then fields defined on this model are returned. * If reverse=True, then relations pointing to this model are returned. * If include_hidden=True, then fields with is_hidden=True are returned. * The include_parents argument toggles if fields from parent models should be included. It has three values: True, False, and PROXY_PARENTS. When set to PROXY_PARENTS, the call will return all fields defined for the current model or any of its parents in the parent chain to the model's concrete model. """ if include_parents not in (True, False, PROXY_PARENTS): raise TypeError("Invalid argument for include_parents: %s" % (include_parents,)) # This helper function is used to allow recursion in ``get_fields()`` # implementation and to provide a fast way for Django's internals to # access specific subsets of fields. # We must keep track of which models we have already seen. Otherwise we # could include the same field multiple times from different models. topmost_call = seen_models is None if topmost_call: seen_models = set() seen_models.add(self.model) # Creates a cache key composed of all arguments cache_key = (forward, reverse, include_parents, include_hidden, topmost_call) try: # In order to avoid list manipulation. Always return a shallow copy # of the results. return self._get_fields_cache[cache_key] except KeyError: pass fields = [] # Recursively call _get_fields() on each parent, with the same # options provided in this call. if include_parents is not False: for parent in self.parents: # In diamond inheritance it is possible that we see the same # model from two different routes. In that case, avoid adding # fields from the same parent again. if parent in seen_models: continue if (parent._meta.concrete_model != self.concrete_model and include_parents == PROXY_PARENTS): continue for obj in parent._meta._get_fields( forward=forward, reverse=reverse, include_parents=include_parents, include_hidden=include_hidden, seen_models=seen_models): if not getattr(obj, 'parent_link', False) or obj.model == self.concrete_model: fields.append(obj) if reverse and not self.proxy: # Tree is computed once and cached until the app cache is expired. # It is composed of a list of fields pointing to the current model # from other models. all_fields = self._relation_tree for field in all_fields: # If hidden fields should be included or the relation is not # intentionally hidden, add to the fields dict. if include_hidden or not field.remote_field.hidden: fields.append(field.remote_field) if forward: fields += self.local_fields fields += self.local_many_to_many # Private fields are recopied to each child model, and they get a # different model as field.model in each child. Hence we have to # add the private fields separately from the topmost call. If we # did this recursively similar to local_fields, we would get field # instances with field.model != self.model. if topmost_call: fields += self.private_fields # In order to avoid list manipulation. Always # return a shallow copy of the results fields = make_immutable_fields_list("get_fields()", fields) # Store result into cache for later access self._get_fields_cache[cache_key] = fields return fields @cached_property def _property_names(self): """Return a set of the names of the properties defined on the model.""" names = [] for name in dir(self.model): attr = inspect.getattr_static(self.model, name) if isinstance(attr, property): names.append(name) return frozenset(names) @cached_property def db_returning_fields(self): """ Private API intended only to be used by Django itself. Fields to be returned after a database insert. """ return [ field for field in self._get_fields(forward=True, reverse=False, include_parents=PROXY_PARENTS) if getattr(field, 'db_returning', False) ]
ce0316c0c08ab9eb5b5668170215acb2c2c5d4cc2fceff8a9e1f29b34d8b2e90
import enum from django.utils.functional import Promise __all__ = ['Choices', 'IntegerChoices', 'TextChoices'] class ChoicesMeta(enum.EnumMeta): """A metaclass for creating a enum choices.""" def __new__(metacls, classname, bases, classdict): labels = [] for key in classdict._member_names: value = classdict[key] if ( isinstance(value, (list, tuple)) and len(value) > 1 and isinstance(value[-1], (Promise, str)) ): *value, label = value value = tuple(value) else: label = key.replace('_', ' ').title() labels.append(label) # Use dict.__setitem__() to suppress defenses against double # assignment in enum's classdict. dict.__setitem__(classdict, key, value) cls = super().__new__(metacls, classname, bases, classdict) cls._value2label_map_ = dict(zip(cls._value2member_map_, labels)) # Add a label property to instances of enum which uses the enum member # that is passed in as "self" as the value to use when looking up the # label in the choices. cls.label = property(lambda self: cls._value2label_map_.get(self.value)) cls.do_not_call_in_templates = True return enum.unique(cls) def __contains__(cls, member): if not isinstance(member, enum.Enum): # Allow non-enums to match against member values. return any(x.value == member for x in cls) return super().__contains__(member) @property def names(cls): empty = ['__empty__'] if hasattr(cls, '__empty__') else [] return empty + [member.name for member in cls] @property def choices(cls): empty = [(None, cls.__empty__)] if hasattr(cls, '__empty__') else [] return empty + [(member.value, member.label) for member in cls] @property def labels(cls): return [label for _, label in cls.choices] @property def values(cls): return [value for value, _ in cls.choices] class Choices(enum.Enum, metaclass=ChoicesMeta): """Class for creating enumerated choices.""" def __str__(self): """ Use value when cast to str, so that Choices set as model instance attributes are rendered as expected in templates and similar contexts. """ return str(self.value) class IntegerChoices(int, Choices): """Class for creating enumerated integer choices.""" pass class TextChoices(str, Choices): """Class for creating enumerated string choices.""" def _generate_next_value_(name, start, count, last_values): return name
fe0075ab052d55706724dd7a5bdc616e856bf0b8a3dede13bc938df8d4ede441
import collections.abc import copy import datetime import decimal import operator import uuid import warnings from base64 import b64decode, b64encode from functools import partialmethod, total_ordering from django import forms from django.apps import apps from django.conf import settings from django.core import checks, exceptions, validators from django.db import connection, connections, router from django.db.models.constants import LOOKUP_SEP from django.db.models.query_utils import DeferredAttribute, RegisterLookupMixin from django.utils import timezone from django.utils.datastructures import DictWrapper from django.utils.dateparse import ( parse_date, parse_datetime, parse_duration, parse_time, ) from django.utils.duration import duration_microseconds, duration_string from django.utils.functional import Promise, cached_property from django.utils.ipv6 import clean_ipv6_address from django.utils.itercompat import is_iterable from django.utils.text import capfirst from django.utils.translation import gettext_lazy as _ __all__ = [ 'AutoField', 'BLANK_CHOICE_DASH', 'BigAutoField', 'BigIntegerField', 'BinaryField', 'BooleanField', 'CharField', 'CommaSeparatedIntegerField', 'DateField', 'DateTimeField', 'DecimalField', 'DurationField', 'EmailField', 'Empty', 'Field', 'FilePathField', 'FloatField', 'GenericIPAddressField', 'IPAddressField', 'IntegerField', 'NOT_PROVIDED', 'NullBooleanField', 'PositiveBigIntegerField', 'PositiveIntegerField', 'PositiveSmallIntegerField', 'SlugField', 'SmallAutoField', 'SmallIntegerField', 'TextField', 'TimeField', 'URLField', 'UUIDField', ] class Empty: pass class NOT_PROVIDED: pass # The values to use for "blank" in SelectFields. Will be appended to the start # of most "choices" lists. BLANK_CHOICE_DASH = [("", "---------")] def _load_field(app_label, model_name, field_name): return apps.get_model(app_label, model_name)._meta.get_field(field_name) # A guide to Field parameters: # # * name: The name of the field specified in the model. # * attname: The attribute to use on the model object. This is the same as # "name", except in the case of ForeignKeys, where "_id" is # appended. # * db_column: The db_column specified in the model (or None). # * column: The database column for this field. This is the same as # "attname", except if db_column is specified. # # Code that introspects values, or does other dynamic things, should use # attname. For example, this gets the primary key value of object "obj": # # getattr(obj, opts.pk.attname) def _empty(of_cls): new = Empty() new.__class__ = of_cls return new def return_None(): return None @total_ordering class Field(RegisterLookupMixin): """Base class for all field types""" # Designates whether empty strings fundamentally are allowed at the # database level. empty_strings_allowed = True empty_values = list(validators.EMPTY_VALUES) # These track each time a Field instance is created. Used to retain order. # The auto_creation_counter is used for fields that Django implicitly # creates, creation_counter is used for all user-specified fields. creation_counter = 0 auto_creation_counter = -1 default_validators = [] # Default set of validators default_error_messages = { 'invalid_choice': _('Value %(value)r is not a valid choice.'), 'null': _('This field cannot be null.'), 'blank': _('This field cannot be blank.'), 'unique': _('%(model_name)s with this %(field_label)s ' 'already exists.'), # Translators: The 'lookup_type' is one of 'date', 'year' or 'month'. # Eg: "Title must be unique for pub_date year" 'unique_for_date': _("%(field_label)s must be unique for " "%(date_field_label)s %(lookup_type)s."), } system_check_deprecated_details = None system_check_removed_details = None # Field flags hidden = False many_to_many = None many_to_one = None one_to_many = None one_to_one = None related_model = None descriptor_class = DeferredAttribute # Generic field type description, usually overridden by subclasses def _description(self): return _('Field of type: %(field_type)s') % { 'field_type': self.__class__.__name__ } description = property(_description) def __init__(self, verbose_name=None, name=None, primary_key=False, max_length=None, unique=False, blank=False, null=False, db_index=False, rel=None, default=NOT_PROVIDED, editable=True, serialize=True, unique_for_date=None, unique_for_month=None, unique_for_year=None, choices=None, help_text='', db_column=None, db_tablespace=None, auto_created=False, validators=(), error_messages=None): self.name = name self.verbose_name = verbose_name # May be set by set_attributes_from_name self._verbose_name = verbose_name # Store original for deconstruction self.primary_key = primary_key self.max_length, self._unique = max_length, unique self.blank, self.null = blank, null self.remote_field = rel self.is_relation = self.remote_field is not None self.default = default self.editable = editable self.serialize = serialize self.unique_for_date = unique_for_date self.unique_for_month = unique_for_month self.unique_for_year = unique_for_year if isinstance(choices, collections.abc.Iterator): choices = list(choices) self.choices = choices self.help_text = help_text self.db_index = db_index self.db_column = db_column self._db_tablespace = db_tablespace self.auto_created = auto_created # Adjust the appropriate creation counter, and save our local copy. if auto_created: self.creation_counter = Field.auto_creation_counter Field.auto_creation_counter -= 1 else: self.creation_counter = Field.creation_counter Field.creation_counter += 1 self._validators = list(validators) # Store for deconstruction later messages = {} for c in reversed(self.__class__.__mro__): messages.update(getattr(c, 'default_error_messages', {})) messages.update(error_messages or {}) self._error_messages = error_messages # Store for deconstruction later self.error_messages = messages def __str__(self): """ Return "app_label.model_label.field_name" for fields attached to models. """ if not hasattr(self, 'model'): return super().__str__() model = self.model app = model._meta.app_label return '%s.%s.%s' % (app, model._meta.object_name, self.name) def __repr__(self): """Display the module, class, and name of the field.""" path = '%s.%s' % (self.__class__.__module__, self.__class__.__qualname__) name = getattr(self, 'name', None) if name is not None: return '<%s: %s>' % (path, name) return '<%s>' % path def check(self, **kwargs): return [ *self._check_field_name(), *self._check_choices(), *self._check_db_index(), *self._check_null_allowed_for_primary_keys(), *self._check_backend_specific_checks(**kwargs), *self._check_validators(), *self._check_deprecation_details(), ] def _check_field_name(self): """ Check if field name is valid, i.e. 1) does not end with an underscore, 2) does not contain "__" and 3) is not "pk". """ if self.name.endswith('_'): return [ checks.Error( 'Field names must not end with an underscore.', obj=self, id='fields.E001', ) ] elif LOOKUP_SEP in self.name: return [ checks.Error( 'Field names must not contain "%s".' % (LOOKUP_SEP,), obj=self, id='fields.E002', ) ] elif self.name == 'pk': return [ checks.Error( "'pk' is a reserved word that cannot be used as a field name.", obj=self, id='fields.E003', ) ] else: return [] @classmethod def _choices_is_value(cls, value): return isinstance(value, (str, Promise)) or not is_iterable(value) def _check_choices(self): if not self.choices: return [] if not is_iterable(self.choices) or isinstance(self.choices, str): return [ checks.Error( "'choices' must be an iterable (e.g., a list or tuple).", obj=self, id='fields.E004', ) ] choice_max_length = 0 # Expect [group_name, [value, display]] for choices_group in self.choices: try: group_name, group_choices = choices_group except (TypeError, ValueError): # Containing non-pairs break try: if not all( self._choices_is_value(value) and self._choices_is_value(human_name) for value, human_name in group_choices ): break if self.max_length is not None and group_choices: choice_max_length = max([ choice_max_length, *(len(value) for value, _ in group_choices if isinstance(value, str)), ]) except (TypeError, ValueError): # No groups, choices in the form [value, display] value, human_name = group_name, group_choices if not self._choices_is_value(value) or not self._choices_is_value(human_name): break if self.max_length is not None and isinstance(value, str): choice_max_length = max(choice_max_length, len(value)) # Special case: choices=['ab'] if isinstance(choices_group, str): break else: if self.max_length is not None and choice_max_length > self.max_length: return [ checks.Error( "'max_length' is too small to fit the longest value " "in 'choices' (%d characters)." % choice_max_length, obj=self, id='fields.E009', ), ] return [] return [ checks.Error( "'choices' must be an iterable containing " "(actual value, human readable name) tuples.", obj=self, id='fields.E005', ) ] def _check_db_index(self): if self.db_index not in (None, True, False): return [ checks.Error( "'db_index' must be None, True or False.", obj=self, id='fields.E006', ) ] else: return [] def _check_null_allowed_for_primary_keys(self): if (self.primary_key and self.null and not connection.features.interprets_empty_strings_as_nulls): # We cannot reliably check this for backends like Oracle which # consider NULL and '' to be equal (and thus set up # character-based fields a little differently). return [ checks.Error( 'Primary keys must not have null=True.', hint=('Set null=False on the field, or ' 'remove primary_key=True argument.'), obj=self, id='fields.E007', ) ] else: return [] def _check_backend_specific_checks(self, **kwargs): app_label = self.model._meta.app_label for db in connections: if router.allow_migrate(db, app_label, model_name=self.model._meta.model_name): return connections[db].validation.check_field(self, **kwargs) return [] def _check_validators(self): errors = [] for i, validator in enumerate(self.validators): if not callable(validator): errors.append( checks.Error( "All 'validators' must be callable.", hint=( "validators[{i}] ({repr}) isn't a function or " "instance of a validator class.".format( i=i, repr=repr(validator), ) ), obj=self, id='fields.E008', ) ) return errors def _check_deprecation_details(self): if self.system_check_removed_details is not None: return [ checks.Error( self.system_check_removed_details.get( 'msg', '%s has been removed except for support in historical ' 'migrations.' % self.__class__.__name__ ), hint=self.system_check_removed_details.get('hint'), obj=self, id=self.system_check_removed_details.get('id', 'fields.EXXX'), ) ] elif self.system_check_deprecated_details is not None: return [ checks.Warning( self.system_check_deprecated_details.get( 'msg', '%s has been deprecated.' % self.__class__.__name__ ), hint=self.system_check_deprecated_details.get('hint'), obj=self, id=self.system_check_deprecated_details.get('id', 'fields.WXXX'), ) ] return [] def get_col(self, alias, output_field=None): if output_field is None: output_field = self if alias != self.model._meta.db_table or output_field != self: from django.db.models.expressions import Col return Col(alias, self, output_field) else: return self.cached_col @cached_property def cached_col(self): from django.db.models.expressions import Col return Col(self.model._meta.db_table, self) def select_format(self, compiler, sql, params): """ Custom format for select clauses. For example, GIS columns need to be selected as AsText(table.col) on MySQL as the table.col data can't be used by Django. """ return sql, params def deconstruct(self): """ Return enough information to recreate the field as a 4-tuple: * The name of the field on the model, if contribute_to_class() has been run. * The import path of the field, including the class:e.g. django.db.models.IntegerField This should be the most portable version, so less specific may be better. * A list of positional arguments. * A dict of keyword arguments. Note that the positional or keyword arguments must contain values of the following types (including inner values of collection types): * None, bool, str, int, float, complex, set, frozenset, list, tuple, dict * UUID * datetime.datetime (naive), datetime.date * top-level classes, top-level functions - will be referenced by their full import path * Storage instances - these have their own deconstruct() method This is because the values here must be serialized into a text format (possibly new Python code, possibly JSON) and these are the only types with encoding handlers defined. There's no need to return the exact way the field was instantiated this time, just ensure that the resulting field is the same - prefer keyword arguments over positional ones, and omit parameters with their default values. """ # Short-form way of fetching all the default parameters keywords = {} possibles = { "verbose_name": None, "primary_key": False, "max_length": None, "unique": False, "blank": False, "null": False, "db_index": False, "default": NOT_PROVIDED, "editable": True, "serialize": True, "unique_for_date": None, "unique_for_month": None, "unique_for_year": None, "choices": None, "help_text": '', "db_column": None, "db_tablespace": None, "auto_created": False, "validators": [], "error_messages": None, } attr_overrides = { "unique": "_unique", "error_messages": "_error_messages", "validators": "_validators", "verbose_name": "_verbose_name", "db_tablespace": "_db_tablespace", } equals_comparison = {"choices", "validators"} for name, default in possibles.items(): value = getattr(self, attr_overrides.get(name, name)) # Unroll anything iterable for choices into a concrete list if name == "choices" and isinstance(value, collections.abc.Iterable): value = list(value) # Do correct kind of comparison if name in equals_comparison: if value != default: keywords[name] = value else: if value is not default: keywords[name] = value # Work out path - we shorten it for known Django core fields path = "%s.%s" % (self.__class__.__module__, self.__class__.__qualname__) if path.startswith("django.db.models.fields.related"): path = path.replace("django.db.models.fields.related", "django.db.models") elif path.startswith("django.db.models.fields.files"): path = path.replace("django.db.models.fields.files", "django.db.models") elif path.startswith("django.db.models.fields.proxy"): path = path.replace("django.db.models.fields.proxy", "django.db.models") elif path.startswith("django.db.models.fields"): path = path.replace("django.db.models.fields", "django.db.models") # Return basic info - other fields should override this. return (self.name, path, [], keywords) def clone(self): """ Uses deconstruct() to clone a new copy of this Field. Will not preserve any class attachments/attribute names. """ name, path, args, kwargs = self.deconstruct() return self.__class__(*args, **kwargs) def __eq__(self, other): # Needed for @total_ordering if isinstance(other, Field): return self.creation_counter == other.creation_counter return NotImplemented def __lt__(self, other): # This is needed because bisect does not take a comparison function. if isinstance(other, Field): return self.creation_counter < other.creation_counter return NotImplemented def __hash__(self): return hash(self.creation_counter) def __deepcopy__(self, memodict): # We don't have to deepcopy very much here, since most things are not # intended to be altered after initial creation. obj = copy.copy(self) if self.remote_field: obj.remote_field = copy.copy(self.remote_field) if hasattr(self.remote_field, 'field') and self.remote_field.field is self: obj.remote_field.field = obj memodict[id(self)] = obj return obj def __copy__(self): # We need to avoid hitting __reduce__, so define this # slightly weird copy construct. obj = Empty() obj.__class__ = self.__class__ obj.__dict__ = self.__dict__.copy() return obj def __reduce__(self): """ Pickling should return the model._meta.fields instance of the field, not a new copy of that field. So, use the app registry to load the model and then the field back. """ if not hasattr(self, 'model'): # Fields are sometimes used without attaching them to models (for # example in aggregation). In this case give back a plain field # instance. The code below will create a new empty instance of # class self.__class__, then update its dict with self.__dict__ # values - so, this is very close to normal pickle. state = self.__dict__.copy() # The _get_default cached_property can't be pickled due to lambda # usage. state.pop('_get_default', None) return _empty, (self.__class__,), state return _load_field, (self.model._meta.app_label, self.model._meta.object_name, self.name) def get_pk_value_on_save(self, instance): """ Hook to generate new PK values on save. This method is called when saving instances with no primary key value set. If this method returns something else than None, then the returned value is used when saving the new instance. """ if self.default: return self.get_default() return None def to_python(self, value): """ Convert the input value into the expected Python data type, raising django.core.exceptions.ValidationError if the data can't be converted. Return the converted value. Subclasses should override this. """ return value @cached_property def validators(self): """ Some validators can't be created at field initialization time. This method provides a way to delay their creation until required. """ return [*self.default_validators, *self._validators] def run_validators(self, value): if value in self.empty_values: return errors = [] for v in self.validators: try: v(value) except exceptions.ValidationError as e: if hasattr(e, 'code') and e.code in self.error_messages: e.message = self.error_messages[e.code] errors.extend(e.error_list) if errors: raise exceptions.ValidationError(errors) def validate(self, value, model_instance): """ Validate value and raise ValidationError if necessary. Subclasses should override this to provide validation logic. """ if not self.editable: # Skip validation for non-editable fields. return if self.choices is not None and value not in self.empty_values: for option_key, option_value in self.choices: if isinstance(option_value, (list, tuple)): # This is an optgroup, so look inside the group for # options. for optgroup_key, optgroup_value in option_value: if value == optgroup_key: return elif value == option_key: return raise exceptions.ValidationError( self.error_messages['invalid_choice'], code='invalid_choice', params={'value': value}, ) if value is None and not self.null: raise exceptions.ValidationError(self.error_messages['null'], code='null') if not self.blank and value in self.empty_values: raise exceptions.ValidationError(self.error_messages['blank'], code='blank') def clean(self, value, model_instance): """ Convert the value's type and run validation. Validation errors from to_python() and validate() are propagated. Return the correct value if no error is raised. """ value = self.to_python(value) self.validate(value, model_instance) self.run_validators(value) return value def db_type_parameters(self, connection): return DictWrapper(self.__dict__, connection.ops.quote_name, 'qn_') def db_check(self, connection): """ Return the database column check constraint for this field, for the provided connection. Works the same way as db_type() for the case that get_internal_type() does not map to a preexisting model field. """ data = self.db_type_parameters(connection) try: return connection.data_type_check_constraints[self.get_internal_type()] % data except KeyError: return None def db_type(self, connection): """ Return the database column data type for this field, for the provided connection. """ # The default implementation of this method looks at the # backend-specific data_types dictionary, looking up the field by its # "internal type". # # A Field class can implement the get_internal_type() method to specify # which *preexisting* Django Field class it's most similar to -- i.e., # a custom field might be represented by a TEXT column type, which is # the same as the TextField Django field type, which means the custom # field's get_internal_type() returns 'TextField'. # # But the limitation of the get_internal_type() / data_types approach # is that it cannot handle database column types that aren't already # mapped to one of the built-in Django field types. In this case, you # can implement db_type() instead of get_internal_type() to specify # exactly which wacky database column type you want to use. data = self.db_type_parameters(connection) try: return connection.data_types[self.get_internal_type()] % data except KeyError: return None def rel_db_type(self, connection): """ Return the data type that a related field pointing to this field should use. For example, this method is called by ForeignKey and OneToOneField to determine its data type. """ return self.db_type(connection) def cast_db_type(self, connection): """Return the data type to use in the Cast() function.""" db_type = connection.ops.cast_data_types.get(self.get_internal_type()) if db_type: return db_type % self.db_type_parameters(connection) return self.db_type(connection) def db_parameters(self, connection): """ Extension of db_type(), providing a range of different return values (type, checks). This will look at db_type(), allowing custom model fields to override it. """ type_string = self.db_type(connection) check_string = self.db_check(connection) return { "type": type_string, "check": check_string, } def db_type_suffix(self, connection): return connection.data_types_suffix.get(self.get_internal_type()) def get_db_converters(self, connection): if hasattr(self, 'from_db_value'): return [self.from_db_value] return [] @property def unique(self): return self._unique or self.primary_key @property def db_tablespace(self): return self._db_tablespace or settings.DEFAULT_INDEX_TABLESPACE @property def db_returning(self): """ Private API intended only to be used by Django itself. Currently only the PostgreSQL backend supports returning multiple fields on a model. """ return False def set_attributes_from_name(self, name): self.name = self.name or name self.attname, self.column = self.get_attname_column() self.concrete = self.column is not None if self.verbose_name is None and self.name: self.verbose_name = self.name.replace('_', ' ') def contribute_to_class(self, cls, name, private_only=False): """ Register the field with the model class it belongs to. If private_only is True, create a separate instance of this field for every subclass of cls, even if cls is not an abstract model. """ self.set_attributes_from_name(name) self.model = cls cls._meta.add_field(self, private=private_only) if self.column: # Don't override classmethods with the descriptor. This means that # if you have a classmethod and a field with the same name, then # such fields can't be deferred (we don't have a check for this). if not getattr(cls, self.attname, None): setattr(cls, self.attname, self.descriptor_class(self)) if self.choices is not None: if not hasattr(cls, 'get_%s_display' % self.name): setattr( cls, 'get_%s_display' % self.name, partialmethod(cls._get_FIELD_display, field=self), ) def get_filter_kwargs_for_object(self, obj): """ Return a dict that when passed as kwargs to self.model.filter(), would yield all instances having the same value for this field as obj has. """ return {self.name: getattr(obj, self.attname)} def get_attname(self): return self.name def get_attname_column(self): attname = self.get_attname() column = self.db_column or attname return attname, column def get_internal_type(self): return self.__class__.__name__ def pre_save(self, model_instance, add): """Return field's value just before saving.""" return getattr(model_instance, self.attname) def get_prep_value(self, value): """Perform preliminary non-db specific value checks and conversions.""" if isinstance(value, Promise): value = value._proxy____cast() return value def get_db_prep_value(self, value, connection, prepared=False): """ Return field's value prepared for interacting with the database backend. Used by the default implementations of get_db_prep_save(). """ if not prepared: value = self.get_prep_value(value) return value def get_db_prep_save(self, value, connection): """Return field's value prepared for saving into a database.""" return self.get_db_prep_value(value, connection=connection, prepared=False) def has_default(self): """Return a boolean of whether this field has a default value.""" return self.default is not NOT_PROVIDED def get_default(self): """Return the default value for this field.""" return self._get_default() @cached_property def _get_default(self): if self.has_default(): if callable(self.default): return self.default return lambda: self.default if not self.empty_strings_allowed or self.null and not connection.features.interprets_empty_strings_as_nulls: return return_None return str # return empty string def get_choices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH, limit_choices_to=None, ordering=()): """ Return choices with a default blank choices included, for use as <select> choices for this field. """ if self.choices is not None: choices = list(self.choices) if include_blank: blank_defined = any(choice in ('', None) for choice, _ in self.flatchoices) if not blank_defined: choices = blank_choice + choices return choices rel_model = self.remote_field.model limit_choices_to = limit_choices_to or self.get_limit_choices_to() choice_func = operator.attrgetter( self.remote_field.get_related_field().attname if hasattr(self.remote_field, 'get_related_field') else 'pk' ) qs = rel_model._default_manager.complex_filter(limit_choices_to) if ordering: qs = qs.order_by(*ordering) return (blank_choice if include_blank else []) + [ (choice_func(x), str(x)) for x in qs ] def value_to_string(self, obj): """ Return a string value of this field from the passed obj. This is used by the serialization framework. """ return str(self.value_from_object(obj)) def _get_flatchoices(self): """Flattened version of choices tuple.""" if self.choices is None: return [] flat = [] for choice, value in self.choices: if isinstance(value, (list, tuple)): flat.extend(value) else: flat.append((choice, value)) return flat flatchoices = property(_get_flatchoices) def save_form_data(self, instance, data): setattr(instance, self.name, data) def formfield(self, form_class=None, choices_form_class=None, **kwargs): """Return a django.forms.Field instance for this field.""" defaults = { 'required': not self.blank, 'label': capfirst(self.verbose_name), 'help_text': self.help_text, } if self.has_default(): if callable(self.default): defaults['initial'] = self.default defaults['show_hidden_initial'] = True else: defaults['initial'] = self.get_default() if self.choices is not None: # Fields with choices get special treatment. include_blank = (self.blank or not (self.has_default() or 'initial' in kwargs)) defaults['choices'] = self.get_choices(include_blank=include_blank) defaults['coerce'] = self.to_python if self.null: defaults['empty_value'] = None if choices_form_class is not None: form_class = choices_form_class else: form_class = forms.TypedChoiceField # Many of the subclass-specific formfield arguments (min_value, # max_value) don't apply for choice fields, so be sure to only pass # the values that TypedChoiceField will understand. for k in list(kwargs): if k not in ('coerce', 'empty_value', 'choices', 'required', 'widget', 'label', 'initial', 'help_text', 'error_messages', 'show_hidden_initial', 'disabled'): del kwargs[k] defaults.update(kwargs) if form_class is None: form_class = forms.CharField return form_class(**defaults) def value_from_object(self, obj): """Return the value of this field in the given model instance.""" return getattr(obj, self.attname) class BooleanField(Field): empty_strings_allowed = False default_error_messages = { 'invalid': _('“%(value)s” value must be either True or False.'), 'invalid_nullable': _('“%(value)s” value must be either True, False, or None.'), } description = _("Boolean (Either True or False)") def get_internal_type(self): return "BooleanField" def to_python(self, value): if self.null and value in self.empty_values: return None if value in (True, False): # 1/0 are equal to True/False. bool() converts former to latter. return bool(value) if value in ('t', 'True', '1'): return True if value in ('f', 'False', '0'): return False raise exceptions.ValidationError( self.error_messages['invalid_nullable' if self.null else 'invalid'], code='invalid', params={'value': value}, ) def get_prep_value(self, value): value = super().get_prep_value(value) if value is None: return None return self.to_python(value) def formfield(self, **kwargs): if self.choices is not None: include_blank = not (self.has_default() or 'initial' in kwargs) defaults = {'choices': self.get_choices(include_blank=include_blank)} else: form_class = forms.NullBooleanField if self.null else forms.BooleanField # In HTML checkboxes, 'required' means "must be checked" which is # different from the choices case ("must select some value"). # required=False allows unchecked checkboxes. defaults = {'form_class': form_class, 'required': False} return super().formfield(**{**defaults, **kwargs}) class CharField(Field): description = _("String (up to %(max_length)s)") def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.validators.append(validators.MaxLengthValidator(self.max_length)) def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_max_length_attribute(**kwargs), ] def _check_max_length_attribute(self, **kwargs): if self.max_length is None: return [ checks.Error( "CharFields must define a 'max_length' attribute.", obj=self, id='fields.E120', ) ] elif (not isinstance(self.max_length, int) or isinstance(self.max_length, bool) or self.max_length <= 0): return [ checks.Error( "'max_length' must be a positive integer.", obj=self, id='fields.E121', ) ] else: return [] def cast_db_type(self, connection): if self.max_length is None: return connection.ops.cast_char_field_without_max_length return super().cast_db_type(connection) def get_internal_type(self): return "CharField" def to_python(self, value): if isinstance(value, str) or value is None: return value return str(value) def get_prep_value(self, value): value = super().get_prep_value(value) return self.to_python(value) def formfield(self, **kwargs): # Passing max_length to forms.CharField means that the value's length # will be validated twice. This is considered acceptable since we want # the value in the form field (to pass into widget for example). defaults = {'max_length': self.max_length} # TODO: Handle multiple backends with different feature flags. if self.null and not connection.features.interprets_empty_strings_as_nulls: defaults['empty_value'] = None defaults.update(kwargs) return super().formfield(**defaults) class CommaSeparatedIntegerField(CharField): default_validators = [validators.validate_comma_separated_integer_list] description = _("Comma-separated integers") system_check_removed_details = { 'msg': ( 'CommaSeparatedIntegerField is removed except for support in ' 'historical migrations.' ), 'hint': ( 'Use CharField(validators=[validate_comma_separated_integer_list]) ' 'instead.' ), 'id': 'fields.E901', } class DateTimeCheckMixin: def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_mutually_exclusive_options(), *self._check_fix_default_value(), ] def _check_mutually_exclusive_options(self): # auto_now, auto_now_add, and default are mutually exclusive # options. The use of more than one of these options together # will trigger an Error mutually_exclusive_options = [self.auto_now_add, self.auto_now, self.has_default()] enabled_options = [option not in (None, False) for option in mutually_exclusive_options].count(True) if enabled_options > 1: return [ checks.Error( "The options auto_now, auto_now_add, and default " "are mutually exclusive. Only one of these options " "may be present.", obj=self, id='fields.E160', ) ] else: return [] def _check_fix_default_value(self): return [] class DateField(DateTimeCheckMixin, Field): empty_strings_allowed = False default_error_messages = { 'invalid': _('“%(value)s” value has an invalid date format. It must be ' 'in YYYY-MM-DD format.'), 'invalid_date': _('“%(value)s” value has the correct format (YYYY-MM-DD) ' 'but it is an invalid date.'), } description = _("Date (without time)") def __init__(self, verbose_name=None, name=None, auto_now=False, auto_now_add=False, **kwargs): self.auto_now, self.auto_now_add = auto_now, auto_now_add if auto_now or auto_now_add: kwargs['editable'] = False kwargs['blank'] = True super().__init__(verbose_name, name, **kwargs) def _check_fix_default_value(self): """ Warn that using an actual date or datetime value is probably wrong; it's only evaluated on server startup. """ if not self.has_default(): return [] now = timezone.now() if not timezone.is_naive(now): now = timezone.make_naive(now, timezone.utc) value = self.default if isinstance(value, datetime.datetime): if not timezone.is_naive(value): value = timezone.make_naive(value, timezone.utc) value = value.date() elif isinstance(value, datetime.date): # Nothing to do, as dates don't have tz information pass else: # No explicit date / datetime value -- no checks necessary return [] offset = datetime.timedelta(days=1) lower = (now - offset).date() upper = (now + offset).date() if lower <= value <= upper: return [ checks.Warning( 'Fixed default value provided.', hint='It seems you set a fixed date / time / datetime ' 'value as default for this field. This may not be ' 'what you want. If you want to have the current date ' 'as default, use `django.utils.timezone.now`', obj=self, id='fields.W161', ) ] return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.auto_now: kwargs['auto_now'] = True if self.auto_now_add: kwargs['auto_now_add'] = True if self.auto_now or self.auto_now_add: del kwargs['editable'] del kwargs['blank'] return name, path, args, kwargs def get_internal_type(self): return "DateField" def to_python(self, value): if value is None: return value if isinstance(value, datetime.datetime): if settings.USE_TZ and timezone.is_aware(value): # Convert aware datetimes to the default time zone # before casting them to dates (#17742). default_timezone = timezone.get_default_timezone() value = timezone.make_naive(value, default_timezone) return value.date() if isinstance(value, datetime.date): return value try: parsed = parse_date(value) if parsed is not None: return parsed except ValueError: raise exceptions.ValidationError( self.error_messages['invalid_date'], code='invalid_date', params={'value': value}, ) raise exceptions.ValidationError( self.error_messages['invalid'], code='invalid', params={'value': value}, ) def pre_save(self, model_instance, add): if self.auto_now or (self.auto_now_add and add): value = datetime.date.today() setattr(model_instance, self.attname, value) return value else: return super().pre_save(model_instance, add) def contribute_to_class(self, cls, name, **kwargs): super().contribute_to_class(cls, name, **kwargs) if not self.null: setattr( cls, 'get_next_by_%s' % self.name, partialmethod(cls._get_next_or_previous_by_FIELD, field=self, is_next=True) ) setattr( cls, 'get_previous_by_%s' % self.name, partialmethod(cls._get_next_or_previous_by_FIELD, field=self, is_next=False) ) def get_prep_value(self, value): value = super().get_prep_value(value) return self.to_python(value) def get_db_prep_value(self, value, connection, prepared=False): # Casts dates into the format expected by the backend if not prepared: value = self.get_prep_value(value) return connection.ops.adapt_datefield_value(value) def value_to_string(self, obj): val = self.value_from_object(obj) return '' if val is None else val.isoformat() def formfield(self, **kwargs): return super().formfield(**{ 'form_class': forms.DateField, **kwargs, }) class DateTimeField(DateField): empty_strings_allowed = False default_error_messages = { 'invalid': _('“%(value)s” value has an invalid format. It must be in ' 'YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ] format.'), 'invalid_date': _("“%(value)s” value has the correct format " "(YYYY-MM-DD) but it is an invalid date."), 'invalid_datetime': _('“%(value)s” value has the correct format ' '(YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ]) ' 'but it is an invalid date/time.'), } description = _("Date (with time)") # __init__ is inherited from DateField def _check_fix_default_value(self): """ Warn that using an actual date or datetime value is probably wrong; it's only evaluated on server startup. """ if not self.has_default(): return [] now = timezone.now() if not timezone.is_naive(now): now = timezone.make_naive(now, timezone.utc) value = self.default if isinstance(value, datetime.datetime): second_offset = datetime.timedelta(seconds=10) lower = now - second_offset upper = now + second_offset if timezone.is_aware(value): value = timezone.make_naive(value, timezone.utc) elif isinstance(value, datetime.date): second_offset = datetime.timedelta(seconds=10) lower = now - second_offset lower = datetime.datetime(lower.year, lower.month, lower.day) upper = now + second_offset upper = datetime.datetime(upper.year, upper.month, upper.day) value = datetime.datetime(value.year, value.month, value.day) else: # No explicit date / datetime value -- no checks necessary return [] if lower <= value <= upper: return [ checks.Warning( 'Fixed default value provided.', hint='It seems you set a fixed date / time / datetime ' 'value as default for this field. This may not be ' 'what you want. If you want to have the current date ' 'as default, use `django.utils.timezone.now`', obj=self, id='fields.W161', ) ] return [] def get_internal_type(self): return "DateTimeField" def to_python(self, value): if value is None: return value if isinstance(value, datetime.datetime): return value if isinstance(value, datetime.date): value = datetime.datetime(value.year, value.month, value.day) if settings.USE_TZ: # For backwards compatibility, interpret naive datetimes in # local time. This won't work during DST change, but we can't # do much about it, so we let the exceptions percolate up the # call stack. warnings.warn("DateTimeField %s.%s received a naive datetime " "(%s) while time zone support is active." % (self.model.__name__, self.name, value), RuntimeWarning) default_timezone = timezone.get_default_timezone() value = timezone.make_aware(value, default_timezone) return value try: parsed = parse_datetime(value) if parsed is not None: return parsed except ValueError: raise exceptions.ValidationError( self.error_messages['invalid_datetime'], code='invalid_datetime', params={'value': value}, ) try: parsed = parse_date(value) if parsed is not None: return datetime.datetime(parsed.year, parsed.month, parsed.day) except ValueError: raise exceptions.ValidationError( self.error_messages['invalid_date'], code='invalid_date', params={'value': value}, ) raise exceptions.ValidationError( self.error_messages['invalid'], code='invalid', params={'value': value}, ) def pre_save(self, model_instance, add): if self.auto_now or (self.auto_now_add and add): value = timezone.now() setattr(model_instance, self.attname, value) return value else: return super().pre_save(model_instance, add) # contribute_to_class is inherited from DateField, it registers # get_next_by_FOO and get_prev_by_FOO def get_prep_value(self, value): value = super().get_prep_value(value) value = self.to_python(value) if value is not None and settings.USE_TZ and timezone.is_naive(value): # For backwards compatibility, interpret naive datetimes in local # time. This won't work during DST change, but we can't do much # about it, so we let the exceptions percolate up the call stack. try: name = '%s.%s' % (self.model.__name__, self.name) except AttributeError: name = '(unbound)' warnings.warn("DateTimeField %s received a naive datetime (%s)" " while time zone support is active." % (name, value), RuntimeWarning) default_timezone = timezone.get_default_timezone() value = timezone.make_aware(value, default_timezone) return value def get_db_prep_value(self, value, connection, prepared=False): # Casts datetimes into the format expected by the backend if not prepared: value = self.get_prep_value(value) return connection.ops.adapt_datetimefield_value(value) def value_to_string(self, obj): val = self.value_from_object(obj) return '' if val is None else val.isoformat() def formfield(self, **kwargs): return super().formfield(**{ 'form_class': forms.DateTimeField, **kwargs, }) class DecimalField(Field): empty_strings_allowed = False default_error_messages = { 'invalid': _('“%(value)s” value must be a decimal number.'), } description = _("Decimal number") def __init__(self, verbose_name=None, name=None, max_digits=None, decimal_places=None, **kwargs): self.max_digits, self.decimal_places = max_digits, decimal_places super().__init__(verbose_name, name, **kwargs) def check(self, **kwargs): errors = super().check(**kwargs) digits_errors = [ *self._check_decimal_places(), *self._check_max_digits(), ] if not digits_errors: errors.extend(self._check_decimal_places_and_max_digits(**kwargs)) else: errors.extend(digits_errors) return errors def _check_decimal_places(self): try: decimal_places = int(self.decimal_places) if decimal_places < 0: raise ValueError() except TypeError: return [ checks.Error( "DecimalFields must define a 'decimal_places' attribute.", obj=self, id='fields.E130', ) ] except ValueError: return [ checks.Error( "'decimal_places' must be a non-negative integer.", obj=self, id='fields.E131', ) ] else: return [] def _check_max_digits(self): try: max_digits = int(self.max_digits) if max_digits <= 0: raise ValueError() except TypeError: return [ checks.Error( "DecimalFields must define a 'max_digits' attribute.", obj=self, id='fields.E132', ) ] except ValueError: return [ checks.Error( "'max_digits' must be a positive integer.", obj=self, id='fields.E133', ) ] else: return [] def _check_decimal_places_and_max_digits(self, **kwargs): if int(self.decimal_places) > int(self.max_digits): return [ checks.Error( "'max_digits' must be greater or equal to 'decimal_places'.", obj=self, id='fields.E134', ) ] return [] @cached_property def validators(self): return super().validators + [ validators.DecimalValidator(self.max_digits, self.decimal_places) ] @cached_property def context(self): return decimal.Context(prec=self.max_digits) def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.max_digits is not None: kwargs['max_digits'] = self.max_digits if self.decimal_places is not None: kwargs['decimal_places'] = self.decimal_places return name, path, args, kwargs def get_internal_type(self): return "DecimalField" def to_python(self, value): if value is None: return value if isinstance(value, float): return self.context.create_decimal_from_float(value) try: return decimal.Decimal(value) except decimal.InvalidOperation: raise exceptions.ValidationError( self.error_messages['invalid'], code='invalid', params={'value': value}, ) def get_db_prep_save(self, value, connection): return connection.ops.adapt_decimalfield_value(self.to_python(value), self.max_digits, self.decimal_places) def get_prep_value(self, value): value = super().get_prep_value(value) return self.to_python(value) def formfield(self, **kwargs): return super().formfield(**{ 'max_digits': self.max_digits, 'decimal_places': self.decimal_places, 'form_class': forms.DecimalField, **kwargs, }) class DurationField(Field): """ Store timedelta objects. Use interval on PostgreSQL, INTERVAL DAY TO SECOND on Oracle, and bigint of microseconds on other databases. """ empty_strings_allowed = False default_error_messages = { 'invalid': _('“%(value)s” value has an invalid format. It must be in ' '[DD] [[HH:]MM:]ss[.uuuuuu] format.') } description = _("Duration") def get_internal_type(self): return "DurationField" def to_python(self, value): if value is None: return value if isinstance(value, datetime.timedelta): return value try: parsed = parse_duration(value) except ValueError: pass else: if parsed is not None: return parsed raise exceptions.ValidationError( self.error_messages['invalid'], code='invalid', params={'value': value}, ) def get_db_prep_value(self, value, connection, prepared=False): if connection.features.has_native_duration_field: return value if value is None: return None return duration_microseconds(value) def get_db_converters(self, connection): converters = [] if not connection.features.has_native_duration_field: converters.append(connection.ops.convert_durationfield_value) return converters + super().get_db_converters(connection) def value_to_string(self, obj): val = self.value_from_object(obj) return '' if val is None else duration_string(val) def formfield(self, **kwargs): return super().formfield(**{ 'form_class': forms.DurationField, **kwargs, }) class EmailField(CharField): default_validators = [validators.validate_email] description = _("Email address") def __init__(self, *args, **kwargs): # max_length=254 to be compliant with RFCs 3696 and 5321 kwargs.setdefault('max_length', 254) super().__init__(*args, **kwargs) def deconstruct(self): name, path, args, kwargs = super().deconstruct() # We do not exclude max_length if it matches default as we want to change # the default in future. return name, path, args, kwargs def formfield(self, **kwargs): # As with CharField, this will cause email validation to be performed # twice. return super().formfield(**{ 'form_class': forms.EmailField, **kwargs, }) class FilePathField(Field): description = _("File path") def __init__(self, verbose_name=None, name=None, path='', match=None, recursive=False, allow_files=True, allow_folders=False, **kwargs): self.path, self.match, self.recursive = path, match, recursive self.allow_files, self.allow_folders = allow_files, allow_folders kwargs.setdefault('max_length', 100) super().__init__(verbose_name, name, **kwargs) def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_allowing_files_or_folders(**kwargs), ] def _check_allowing_files_or_folders(self, **kwargs): if not self.allow_files and not self.allow_folders: return [ checks.Error( "FilePathFields must have either 'allow_files' or 'allow_folders' set to True.", obj=self, id='fields.E140', ) ] return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.path != '': kwargs['path'] = self.path if self.match is not None: kwargs['match'] = self.match if self.recursive is not False: kwargs['recursive'] = self.recursive if self.allow_files is not True: kwargs['allow_files'] = self.allow_files if self.allow_folders is not False: kwargs['allow_folders'] = self.allow_folders if kwargs.get("max_length") == 100: del kwargs["max_length"] return name, path, args, kwargs def get_prep_value(self, value): value = super().get_prep_value(value) if value is None: return None return str(value) def formfield(self, **kwargs): return super().formfield(**{ 'path': self.path() if callable(self.path) else self.path, 'match': self.match, 'recursive': self.recursive, 'form_class': forms.FilePathField, 'allow_files': self.allow_files, 'allow_folders': self.allow_folders, **kwargs, }) def get_internal_type(self): return "FilePathField" class FloatField(Field): empty_strings_allowed = False default_error_messages = { 'invalid': _('“%(value)s” value must be a float.'), } description = _("Floating point number") def get_prep_value(self, value): value = super().get_prep_value(value) if value is None: return None try: return float(value) except (TypeError, ValueError) as e: raise e.__class__( "Field '%s' expected a number but got %r." % (self.name, value), ) from e def get_internal_type(self): return "FloatField" def to_python(self, value): if value is None: return value try: return float(value) except (TypeError, ValueError): raise exceptions.ValidationError( self.error_messages['invalid'], code='invalid', params={'value': value}, ) def formfield(self, **kwargs): return super().formfield(**{ 'form_class': forms.FloatField, **kwargs, }) class IntegerField(Field): empty_strings_allowed = False default_error_messages = { 'invalid': _('“%(value)s” value must be an integer.'), } description = _("Integer") def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_max_length_warning(), ] def _check_max_length_warning(self): if self.max_length is not None: return [ checks.Warning( "'max_length' is ignored when used with %s." % self.__class__.__name__, hint="Remove 'max_length' from field", obj=self, id='fields.W122', ) ] return [] @cached_property def validators(self): # These validators can't be added at field initialization time since # they're based on values retrieved from `connection`. validators_ = super().validators internal_type = self.get_internal_type() min_value, max_value = connection.ops.integer_field_range(internal_type) if min_value is not None and not any( ( isinstance(validator, validators.MinValueValidator) and ( validator.limit_value() if callable(validator.limit_value) else validator.limit_value ) >= min_value ) for validator in validators_ ): validators_.append(validators.MinValueValidator(min_value)) if max_value is not None and not any( ( isinstance(validator, validators.MaxValueValidator) and ( validator.limit_value() if callable(validator.limit_value) else validator.limit_value ) <= max_value ) for validator in validators_ ): validators_.append(validators.MaxValueValidator(max_value)) return validators_ def get_prep_value(self, value): value = super().get_prep_value(value) if value is None: return None try: return int(value) except (TypeError, ValueError) as e: raise e.__class__( "Field '%s' expected a number but got %r." % (self.name, value), ) from e def get_internal_type(self): return "IntegerField" def to_python(self, value): if value is None: return value try: return int(value) except (TypeError, ValueError): raise exceptions.ValidationError( self.error_messages['invalid'], code='invalid', params={'value': value}, ) def formfield(self, **kwargs): return super().formfield(**{ 'form_class': forms.IntegerField, **kwargs, }) class BigIntegerField(IntegerField): description = _("Big (8 byte) integer") MAX_BIGINT = 9223372036854775807 def get_internal_type(self): return "BigIntegerField" def formfield(self, **kwargs): return super().formfield(**{ 'min_value': -BigIntegerField.MAX_BIGINT - 1, 'max_value': BigIntegerField.MAX_BIGINT, **kwargs, }) class IPAddressField(Field): empty_strings_allowed = False description = _("IPv4 address") system_check_removed_details = { 'msg': ( 'IPAddressField has been removed except for support in ' 'historical migrations.' ), 'hint': 'Use GenericIPAddressField instead.', 'id': 'fields.E900', } def __init__(self, *args, **kwargs): kwargs['max_length'] = 15 super().__init__(*args, **kwargs) def deconstruct(self): name, path, args, kwargs = super().deconstruct() del kwargs['max_length'] return name, path, args, kwargs def get_prep_value(self, value): value = super().get_prep_value(value) if value is None: return None return str(value) def get_internal_type(self): return "IPAddressField" class GenericIPAddressField(Field): empty_strings_allowed = False description = _("IP address") default_error_messages = {} def __init__(self, verbose_name=None, name=None, protocol='both', unpack_ipv4=False, *args, **kwargs): self.unpack_ipv4 = unpack_ipv4 self.protocol = protocol self.default_validators, invalid_error_message = \ validators.ip_address_validators(protocol, unpack_ipv4) self.default_error_messages['invalid'] = invalid_error_message kwargs['max_length'] = 39 super().__init__(verbose_name, name, *args, **kwargs) def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_blank_and_null_values(**kwargs), ] def _check_blank_and_null_values(self, **kwargs): if not getattr(self, 'null', False) and getattr(self, 'blank', False): return [ checks.Error( 'GenericIPAddressFields cannot have blank=True if null=False, ' 'as blank values are stored as nulls.', obj=self, id='fields.E150', ) ] return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.unpack_ipv4 is not False: kwargs['unpack_ipv4'] = self.unpack_ipv4 if self.protocol != "both": kwargs['protocol'] = self.protocol if kwargs.get("max_length") == 39: del kwargs['max_length'] return name, path, args, kwargs def get_internal_type(self): return "GenericIPAddressField" def to_python(self, value): if value is None: return None if not isinstance(value, str): value = str(value) value = value.strip() if ':' in value: return clean_ipv6_address(value, self.unpack_ipv4, self.error_messages['invalid']) return value def get_db_prep_value(self, value, connection, prepared=False): if not prepared: value = self.get_prep_value(value) return connection.ops.adapt_ipaddressfield_value(value) def get_prep_value(self, value): value = super().get_prep_value(value) if value is None: return None if value and ':' in value: try: return clean_ipv6_address(value, self.unpack_ipv4) except exceptions.ValidationError: pass return str(value) def formfield(self, **kwargs): return super().formfield(**{ 'protocol': self.protocol, 'form_class': forms.GenericIPAddressField, **kwargs, }) class NullBooleanField(BooleanField): default_error_messages = { 'invalid': _('“%(value)s” value must be either None, True or False.'), 'invalid_nullable': _('“%(value)s” value must be either None, True or False.'), } description = _("Boolean (Either True, False or None)") def __init__(self, *args, **kwargs): kwargs['null'] = True kwargs['blank'] = True super().__init__(*args, **kwargs) def deconstruct(self): name, path, args, kwargs = super().deconstruct() del kwargs['null'] del kwargs['blank'] return name, path, args, kwargs def get_internal_type(self): return "NullBooleanField" class PositiveIntegerRelDbTypeMixin: def rel_db_type(self, connection): """ Return the data type that a related field pointing to this field should use. In most cases, a foreign key pointing to a positive integer primary key will have an integer column data type but some databases (e.g. MySQL) have an unsigned integer type. In that case (related_fields_match_type=True), the primary key should return its db_type. """ if connection.features.related_fields_match_type: return self.db_type(connection) else: return IntegerField().db_type(connection=connection) class PositiveBigIntegerField(PositiveIntegerRelDbTypeMixin, IntegerField): description = _('Positive big integer') def get_internal_type(self): return 'PositiveBigIntegerField' def formfield(self, **kwargs): return super().formfield(**{ 'min_value': 0, **kwargs, }) class PositiveIntegerField(PositiveIntegerRelDbTypeMixin, IntegerField): description = _("Positive integer") def get_internal_type(self): return "PositiveIntegerField" def formfield(self, **kwargs): return super().formfield(**{ 'min_value': 0, **kwargs, }) class PositiveSmallIntegerField(PositiveIntegerRelDbTypeMixin, IntegerField): description = _("Positive small integer") def get_internal_type(self): return "PositiveSmallIntegerField" def formfield(self, **kwargs): return super().formfield(**{ 'min_value': 0, **kwargs, }) class SlugField(CharField): default_validators = [validators.validate_slug] description = _("Slug (up to %(max_length)s)") def __init__(self, *args, max_length=50, db_index=True, allow_unicode=False, **kwargs): self.allow_unicode = allow_unicode if self.allow_unicode: self.default_validators = [validators.validate_unicode_slug] super().__init__(*args, max_length=max_length, db_index=db_index, **kwargs) def deconstruct(self): name, path, args, kwargs = super().deconstruct() if kwargs.get("max_length") == 50: del kwargs['max_length'] if self.db_index is False: kwargs['db_index'] = False else: del kwargs['db_index'] if self.allow_unicode is not False: kwargs['allow_unicode'] = self.allow_unicode return name, path, args, kwargs def get_internal_type(self): return "SlugField" def formfield(self, **kwargs): return super().formfield(**{ 'form_class': forms.SlugField, 'allow_unicode': self.allow_unicode, **kwargs, }) class SmallIntegerField(IntegerField): description = _("Small integer") def get_internal_type(self): return "SmallIntegerField" class TextField(Field): description = _("Text") def get_internal_type(self): return "TextField" def to_python(self, value): if isinstance(value, str) or value is None: return value return str(value) def get_prep_value(self, value): value = super().get_prep_value(value) return self.to_python(value) def formfield(self, **kwargs): # Passing max_length to forms.CharField means that the value's length # will be validated twice. This is considered acceptable since we want # the value in the form field (to pass into widget for example). return super().formfield(**{ 'max_length': self.max_length, **({} if self.choices is not None else {'widget': forms.Textarea}), **kwargs, }) class TimeField(DateTimeCheckMixin, Field): empty_strings_allowed = False default_error_messages = { 'invalid': _('“%(value)s” value has an invalid format. It must be in ' 'HH:MM[:ss[.uuuuuu]] format.'), 'invalid_time': _('“%(value)s” value has the correct format ' '(HH:MM[:ss[.uuuuuu]]) but it is an invalid time.'), } description = _("Time") def __init__(self, verbose_name=None, name=None, auto_now=False, auto_now_add=False, **kwargs): self.auto_now, self.auto_now_add = auto_now, auto_now_add if auto_now or auto_now_add: kwargs['editable'] = False kwargs['blank'] = True super().__init__(verbose_name, name, **kwargs) def _check_fix_default_value(self): """ Warn that using an actual date or datetime value is probably wrong; it's only evaluated on server startup. """ if not self.has_default(): return [] now = timezone.now() if not timezone.is_naive(now): now = timezone.make_naive(now, timezone.utc) value = self.default if isinstance(value, datetime.datetime): second_offset = datetime.timedelta(seconds=10) lower = now - second_offset upper = now + second_offset if timezone.is_aware(value): value = timezone.make_naive(value, timezone.utc) elif isinstance(value, datetime.time): second_offset = datetime.timedelta(seconds=10) lower = now - second_offset upper = now + second_offset value = datetime.datetime.combine(now.date(), value) if timezone.is_aware(value): value = timezone.make_naive(value, timezone.utc).time() else: # No explicit time / datetime value -- no checks necessary return [] if lower <= value <= upper: return [ checks.Warning( 'Fixed default value provided.', hint='It seems you set a fixed date / time / datetime ' 'value as default for this field. This may not be ' 'what you want. If you want to have the current date ' 'as default, use `django.utils.timezone.now`', obj=self, id='fields.W161', ) ] return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.auto_now is not False: kwargs["auto_now"] = self.auto_now if self.auto_now_add is not False: kwargs["auto_now_add"] = self.auto_now_add if self.auto_now or self.auto_now_add: del kwargs['blank'] del kwargs['editable'] return name, path, args, kwargs def get_internal_type(self): return "TimeField" def to_python(self, value): if value is None: return None if isinstance(value, datetime.time): return value if isinstance(value, datetime.datetime): # Not usually a good idea to pass in a datetime here (it loses # information), but this can be a side-effect of interacting with a # database backend (e.g. Oracle), so we'll be accommodating. return value.time() try: parsed = parse_time(value) if parsed is not None: return parsed except ValueError: raise exceptions.ValidationError( self.error_messages['invalid_time'], code='invalid_time', params={'value': value}, ) raise exceptions.ValidationError( self.error_messages['invalid'], code='invalid', params={'value': value}, ) def pre_save(self, model_instance, add): if self.auto_now or (self.auto_now_add and add): value = datetime.datetime.now().time() setattr(model_instance, self.attname, value) return value else: return super().pre_save(model_instance, add) def get_prep_value(self, value): value = super().get_prep_value(value) return self.to_python(value) def get_db_prep_value(self, value, connection, prepared=False): # Casts times into the format expected by the backend if not prepared: value = self.get_prep_value(value) return connection.ops.adapt_timefield_value(value) def value_to_string(self, obj): val = self.value_from_object(obj) return '' if val is None else val.isoformat() def formfield(self, **kwargs): return super().formfield(**{ 'form_class': forms.TimeField, **kwargs, }) class URLField(CharField): default_validators = [validators.URLValidator()] description = _("URL") def __init__(self, verbose_name=None, name=None, **kwargs): kwargs.setdefault('max_length', 200) super().__init__(verbose_name, name, **kwargs) def deconstruct(self): name, path, args, kwargs = super().deconstruct() if kwargs.get("max_length") == 200: del kwargs['max_length'] return name, path, args, kwargs def formfield(self, **kwargs): # As with CharField, this will cause URL validation to be performed # twice. return super().formfield(**{ 'form_class': forms.URLField, **kwargs, }) class BinaryField(Field): description = _("Raw binary data") empty_values = [None, b''] def __init__(self, *args, **kwargs): kwargs.setdefault('editable', False) super().__init__(*args, **kwargs) if self.max_length is not None: self.validators.append(validators.MaxLengthValidator(self.max_length)) def check(self, **kwargs): return [*super().check(**kwargs), *self._check_str_default_value()] def _check_str_default_value(self): if self.has_default() and isinstance(self.default, str): return [ checks.Error( "BinaryField's default cannot be a string. Use bytes " "content instead.", obj=self, id='fields.E170', ) ] return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.editable: kwargs['editable'] = True else: del kwargs['editable'] return name, path, args, kwargs def get_internal_type(self): return "BinaryField" def get_placeholder(self, value, compiler, connection): return connection.ops.binary_placeholder_sql(value) def get_default(self): if self.has_default() and not callable(self.default): return self.default default = super().get_default() if default == '': return b'' return default def get_db_prep_value(self, value, connection, prepared=False): value = super().get_db_prep_value(value, connection, prepared) if value is not None: return connection.Database.Binary(value) return value def value_to_string(self, obj): """Binary data is serialized as base64""" return b64encode(self.value_from_object(obj)).decode('ascii') def to_python(self, value): # If it's a string, it should be base64-encoded data if isinstance(value, str): return memoryview(b64decode(value.encode('ascii'))) return value class UUIDField(Field): default_error_messages = { 'invalid': _('“%(value)s” is not a valid UUID.'), } description = _('Universally unique identifier') empty_strings_allowed = False def __init__(self, verbose_name=None, **kwargs): kwargs['max_length'] = 32 super().__init__(verbose_name, **kwargs) def deconstruct(self): name, path, args, kwargs = super().deconstruct() del kwargs['max_length'] return name, path, args, kwargs def get_internal_type(self): return "UUIDField" def get_prep_value(self, value): value = super().get_prep_value(value) return self.to_python(value) def get_db_prep_value(self, value, connection, prepared=False): if value is None: return None if not isinstance(value, uuid.UUID): value = self.to_python(value) if connection.features.has_native_uuid_field: return value return value.hex def to_python(self, value): if value is not None and not isinstance(value, uuid.UUID): input_form = 'int' if isinstance(value, int) else 'hex' try: return uuid.UUID(**{input_form: value}) except (AttributeError, ValueError): raise exceptions.ValidationError( self.error_messages['invalid'], code='invalid', params={'value': value}, ) return value def formfield(self, **kwargs): return super().formfield(**{ 'form_class': forms.UUIDField, **kwargs, }) class AutoFieldMixin: db_returning = True def __init__(self, *args, **kwargs): kwargs['blank'] = True super().__init__(*args, **kwargs) def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_primary_key(), ] def _check_primary_key(self): if not self.primary_key: return [ checks.Error( 'AutoFields must set primary_key=True.', obj=self, id='fields.E100', ), ] else: return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() del kwargs['blank'] kwargs['primary_key'] = True return name, path, args, kwargs def validate(self, value, model_instance): pass def get_db_prep_value(self, value, connection, prepared=False): if not prepared: value = self.get_prep_value(value) value = connection.ops.validate_autopk_value(value) return value def contribute_to_class(self, cls, name, **kwargs): assert not cls._meta.auto_field, ( "Model %s can't have more than one auto-generated field." % cls._meta.label ) super().contribute_to_class(cls, name, **kwargs) cls._meta.auto_field = self def formfield(self, **kwargs): return None class AutoFieldMeta(type): """ Metaclass to maintain backward inheritance compatibility for AutoField. It is intended that AutoFieldMixin become public API when it is possible to create a non-integer automatically-generated field using column defaults stored in the database. In many areas Django also relies on using isinstance() to check for an automatically-generated field as a subclass of AutoField. A new flag needs to be implemented on Field to be used instead. When these issues have been addressed, this metaclass could be used to deprecate inheritance from AutoField and use of isinstance() with AutoField for detecting automatically-generated fields. """ @property def _subclasses(self): return (BigAutoField, SmallAutoField) def __instancecheck__(self, instance): return isinstance(instance, self._subclasses) or super().__instancecheck__(instance) def __subclasscheck__(self, subclass): return subclass in self._subclasses or super().__subclasscheck__(subclass) class AutoField(AutoFieldMixin, IntegerField, metaclass=AutoFieldMeta): def get_internal_type(self): return 'AutoField' def rel_db_type(self, connection): return IntegerField().db_type(connection=connection) class BigAutoField(AutoFieldMixin, BigIntegerField): def get_internal_type(self): return 'BigAutoField' def rel_db_type(self, connection): return BigIntegerField().db_type(connection=connection) class SmallAutoField(AutoFieldMixin, SmallIntegerField): def get_internal_type(self): return 'SmallAutoField' def rel_db_type(self, connection): return SmallIntegerField().db_type(connection=connection)
f49f073e13a9b88fd5e16343f3f8e5d7bfd31b4d0c36e6661e2c5c78e89f0919
import datetime import posixpath from django import forms from django.core import checks from django.core.files.base import File from django.core.files.images import ImageFile from django.core.files.storage import default_storage from django.db.models import signals from django.db.models.fields import Field from django.utils.translation import gettext_lazy as _ class FieldFile(File): def __init__(self, instance, field, name): super().__init__(None, name) self.instance = instance self.field = field self.storage = field.storage self._committed = True def __eq__(self, other): # Older code may be expecting FileField values to be simple strings. # By overriding the == operator, it can remain backwards compatibility. if hasattr(other, 'name'): return self.name == other.name return self.name == other def __hash__(self): return hash(self.name) # The standard File contains most of the necessary properties, but # FieldFiles can be instantiated without a name, so that needs to # be checked for here. def _require_file(self): if not self: raise ValueError("The '%s' attribute has no file associated with it." % self.field.name) def _get_file(self): self._require_file() if getattr(self, '_file', None) is None: self._file = self.storage.open(self.name, 'rb') return self._file def _set_file(self, file): self._file = file def _del_file(self): del self._file file = property(_get_file, _set_file, _del_file) @property def path(self): self._require_file() return self.storage.path(self.name) @property def url(self): self._require_file() return self.storage.url(self.name) @property def size(self): self._require_file() if not self._committed: return self.file.size return self.storage.size(self.name) def open(self, mode='rb'): self._require_file() if getattr(self, '_file', None) is None: self.file = self.storage.open(self.name, mode) else: self.file.open(mode) return self # open() doesn't alter the file's contents, but it does reset the pointer open.alters_data = True # In addition to the standard File API, FieldFiles have extra methods # to further manipulate the underlying file, as well as update the # associated model instance. def save(self, name, content, save=True): name = self.field.generate_filename(self.instance, name) self.name = self.storage.save(name, content, max_length=self.field.max_length) setattr(self.instance, self.field.name, self.name) self._committed = True # Save the object because it has changed, unless save is False if save: self.instance.save() save.alters_data = True def delete(self, save=True): if not self: return # Only close the file if it's already open, which we know by the # presence of self._file if hasattr(self, '_file'): self.close() del self.file self.storage.delete(self.name) self.name = None setattr(self.instance, self.field.name, self.name) self._committed = False if save: self.instance.save() delete.alters_data = True @property def closed(self): file = getattr(self, '_file', None) return file is None or file.closed def close(self): file = getattr(self, '_file', None) if file is not None: file.close() def __getstate__(self): # FieldFile needs access to its associated model field, an instance and # the file's name. Everything else will be restored later, by # FileDescriptor below. return { 'name': self.name, 'closed': False, '_committed': True, '_file': None, 'instance': self.instance, 'field': self.field, } def __setstate__(self, state): self.__dict__.update(state) self.storage = self.field.storage class FileDescriptor: """ The descriptor for the file attribute on the model instance. Return a FieldFile when accessed so you can write code like:: >>> from myapp.models import MyModel >>> instance = MyModel.objects.get(pk=1) >>> instance.file.size Assign a file object on assignment so you can do:: >>> with open('/path/to/hello.world') as f: ... instance.file = File(f) """ def __init__(self, field): self.field = field def __get__(self, instance, cls=None): if instance is None: return self # This is slightly complicated, so worth an explanation. # instance.file`needs to ultimately return some instance of `File`, # probably a subclass. Additionally, this returned object needs to have # the FieldFile API so that users can easily do things like # instance.file.path and have that delegated to the file storage engine. # Easy enough if we're strict about assignment in __set__, but if you # peek below you can see that we're not. So depending on the current # value of the field we have to dynamically construct some sort of # "thing" to return. # The instance dict contains whatever was originally assigned # in __set__. if self.field.name in instance.__dict__: file = instance.__dict__[self.field.name] else: instance.refresh_from_db(fields=[self.field.name]) file = getattr(instance, self.field.name) # If this value is a string (instance.file = "path/to/file") or None # then we simply wrap it with the appropriate attribute class according # to the file field. [This is FieldFile for FileFields and # ImageFieldFile for ImageFields; it's also conceivable that user # subclasses might also want to subclass the attribute class]. This # object understands how to convert a path to a file, and also how to # handle None. if isinstance(file, str) or file is None: attr = self.field.attr_class(instance, self.field, file) instance.__dict__[self.field.name] = attr # Other types of files may be assigned as well, but they need to have # the FieldFile interface added to them. Thus, we wrap any other type of # File inside a FieldFile (well, the field's attr_class, which is # usually FieldFile). elif isinstance(file, File) and not isinstance(file, FieldFile): file_copy = self.field.attr_class(instance, self.field, file.name) file_copy.file = file file_copy._committed = False instance.__dict__[self.field.name] = file_copy # Finally, because of the (some would say boneheaded) way pickle works, # the underlying FieldFile might not actually itself have an associated # file. So we need to reset the details of the FieldFile in those cases. elif isinstance(file, FieldFile) and not hasattr(file, 'field'): file.instance = instance file.field = self.field file.storage = self.field.storage # Make sure that the instance is correct. elif isinstance(file, FieldFile) and instance is not file.instance: file.instance = instance # That was fun, wasn't it? return instance.__dict__[self.field.name] def __set__(self, instance, value): instance.__dict__[self.field.name] = value class FileField(Field): # The class to wrap instance attributes in. Accessing the file object off # the instance will always return an instance of attr_class. attr_class = FieldFile # The descriptor to use for accessing the attribute off of the class. descriptor_class = FileDescriptor description = _("File") def __init__(self, verbose_name=None, name=None, upload_to='', storage=None, **kwargs): self._primary_key_set_explicitly = 'primary_key' in kwargs self.storage = storage or default_storage self.upload_to = upload_to kwargs.setdefault('max_length', 100) super().__init__(verbose_name, name, **kwargs) def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_primary_key(), *self._check_upload_to(), ] def _check_primary_key(self): if self._primary_key_set_explicitly: return [ checks.Error( "'primary_key' is not a valid argument for a %s." % self.__class__.__name__, obj=self, id='fields.E201', ) ] else: return [] def _check_upload_to(self): if isinstance(self.upload_to, str) and self.upload_to.startswith('/'): return [ checks.Error( "%s's 'upload_to' argument must be a relative path, not an " "absolute path." % self.__class__.__name__, obj=self, id='fields.E202', hint='Remove the leading slash.', ) ] else: return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() if kwargs.get("max_length") == 100: del kwargs["max_length"] kwargs['upload_to'] = self.upload_to if self.storage is not default_storage: kwargs['storage'] = self.storage return name, path, args, kwargs def get_internal_type(self): return "FileField" def get_prep_value(self, value): value = super().get_prep_value(value) # Need to convert File objects provided via a form to string for database insertion if value is None: return None return str(value) def pre_save(self, model_instance, add): file = super().pre_save(model_instance, add) if file and not file._committed: # Commit the file to storage prior to saving the model file.save(file.name, file.file, save=False) return file def contribute_to_class(self, cls, name, **kwargs): super().contribute_to_class(cls, name, **kwargs) setattr(cls, self.name, self.descriptor_class(self)) def generate_filename(self, instance, filename): """ Apply (if callable) or prepend (if a string) upload_to to the filename, then delegate further processing of the name to the storage backend. Until the storage layer, all file paths are expected to be Unix style (with forward slashes). """ if callable(self.upload_to): filename = self.upload_to(instance, filename) else: dirname = datetime.datetime.now().strftime(str(self.upload_to)) filename = posixpath.join(dirname, filename) return self.storage.generate_filename(filename) def save_form_data(self, instance, data): # Important: None means "no change", other false value means "clear" # This subtle distinction (rather than a more explicit marker) is # needed because we need to consume values that are also sane for a # regular (non Model-) Form to find in its cleaned_data dictionary. if data is not None: # This value will be converted to str and stored in the # database, so leaving False as-is is not acceptable. setattr(instance, self.name, data or '') def formfield(self, **kwargs): return super().formfield(**{ 'form_class': forms.FileField, 'max_length': self.max_length, **kwargs, }) class ImageFileDescriptor(FileDescriptor): """ Just like the FileDescriptor, but for ImageFields. The only difference is assigning the width/height to the width_field/height_field, if appropriate. """ def __set__(self, instance, value): previous_file = instance.__dict__.get(self.field.name) super().__set__(instance, value) # To prevent recalculating image dimensions when we are instantiating # an object from the database (bug #11084), only update dimensions if # the field had a value before this assignment. Since the default # value for FileField subclasses is an instance of field.attr_class, # previous_file will only be None when we are called from # Model.__init__(). The ImageField.update_dimension_fields method # hooked up to the post_init signal handles the Model.__init__() cases. # Assignment happening outside of Model.__init__() will trigger the # update right here. if previous_file is not None: self.field.update_dimension_fields(instance, force=True) class ImageFieldFile(ImageFile, FieldFile): def delete(self, save=True): # Clear the image dimensions cache if hasattr(self, '_dimensions_cache'): del self._dimensions_cache super().delete(save) class ImageField(FileField): attr_class = ImageFieldFile descriptor_class = ImageFileDescriptor description = _("Image") def __init__(self, verbose_name=None, name=None, width_field=None, height_field=None, **kwargs): self.width_field, self.height_field = width_field, height_field super().__init__(verbose_name, name, **kwargs) def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_image_library_installed(), ] def _check_image_library_installed(self): try: from PIL import Image # NOQA except ImportError: return [ checks.Error( 'Cannot use ImageField because Pillow is not installed.', hint=('Get Pillow at https://pypi.org/project/Pillow/ ' 'or run command "python -m pip install Pillow".'), obj=self, id='fields.E210', ) ] else: return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.width_field: kwargs['width_field'] = self.width_field if self.height_field: kwargs['height_field'] = self.height_field return name, path, args, kwargs def contribute_to_class(self, cls, name, **kwargs): super().contribute_to_class(cls, name, **kwargs) # Attach update_dimension_fields so that dimension fields declared # after their corresponding image field don't stay cleared by # Model.__init__, see bug #11196. # Only run post-initialization dimension update on non-abstract models if not cls._meta.abstract: signals.post_init.connect(self.update_dimension_fields, sender=cls) def update_dimension_fields(self, instance, force=False, *args, **kwargs): """ Update field's width and height fields, if defined. This method is hooked up to model's post_init signal to update dimensions after instantiating a model instance. However, dimensions won't be updated if the dimensions fields are already populated. This avoids unnecessary recalculation when loading an object from the database. Dimensions can be forced to update with force=True, which is how ImageFileDescriptor.__set__ calls this method. """ # Nothing to update if the field doesn't have dimension fields or if # the field is deferred. has_dimension_fields = self.width_field or self.height_field if not has_dimension_fields or self.attname not in instance.__dict__: return # getattr will call the ImageFileDescriptor's __get__ method, which # coerces the assigned value into an instance of self.attr_class # (ImageFieldFile in this case). file = getattr(instance, self.attname) # Nothing to update if we have no file and not being forced to update. if not file and not force: return dimension_fields_filled = not( (self.width_field and not getattr(instance, self.width_field)) or (self.height_field and not getattr(instance, self.height_field)) ) # When both dimension fields have values, we are most likely loading # data from the database or updating an image field that already had # an image stored. In the first case, we don't want to update the # dimension fields because we are already getting their values from the # database. In the second case, we do want to update the dimensions # fields and will skip this return because force will be True since we # were called from ImageFileDescriptor.__set__. if dimension_fields_filled and not force: return # file should be an instance of ImageFieldFile or should be None. if file: width = file.width height = file.height else: # No file, so clear dimensions fields. width = None height = None # Update the width and height fields. if self.width_field: setattr(instance, self.width_field, width) if self.height_field: setattr(instance, self.height_field, height) def formfield(self, **kwargs): return super().formfield(**{ 'form_class': forms.ImageField, **kwargs, })
4b8ed258588ca7e5a1f1d4f970b09362491828c6a65d044f148a30102edbabf8
""" Create SQL statements for QuerySets. The code in here encapsulates all of the SQL construction so that QuerySets themselves do not have to (and could be backed by things other than SQL databases). The abstraction barrier only works one way: this module has to know all about the internals of models in order to get the information it needs. """ import copy import difflib import functools import inspect import sys import warnings from collections import Counter, namedtuple from collections.abc import Iterator, Mapping from itertools import chain, count, product from string import ascii_uppercase from django.core.exceptions import ( EmptyResultSet, FieldDoesNotExist, FieldError, ) from django.db import DEFAULT_DB_ALIAS, NotSupportedError, connections from django.db.models.aggregates import Count from django.db.models.constants import LOOKUP_SEP from django.db.models.expressions import BaseExpression, Col, F, OuterRef, Ref from django.db.models.fields import Field from django.db.models.fields.related_lookups import MultiColSource from django.db.models.lookups import Lookup from django.db.models.query_utils import ( Q, check_rel_lookup_compatibility, refs_expression, ) from django.db.models.sql.constants import ( INNER, LOUTER, ORDER_DIR, ORDER_PATTERN, SINGLE, ) from django.db.models.sql.datastructures import ( BaseTable, Empty, Join, MultiJoin, ) from django.db.models.sql.where import ( AND, OR, ExtraWhere, NothingNode, WhereNode, ) from django.utils.deprecation import RemovedInDjango40Warning from django.utils.functional import cached_property from django.utils.tree import Node __all__ = ['Query', 'RawQuery'] def get_field_names_from_opts(opts): return set(chain.from_iterable( (f.name, f.attname) if f.concrete else (f.name,) for f in opts.get_fields() )) def get_children_from_q(q): for child in q.children: if isinstance(child, Node): yield from get_children_from_q(child) else: yield child JoinInfo = namedtuple( 'JoinInfo', ('final_field', 'targets', 'opts', 'joins', 'path', 'transform_function') ) class RawQuery: """A single raw SQL query.""" def __init__(self, sql, using, params=None): self.params = params or () self.sql = sql self.using = using self.cursor = None # Mirror some properties of a normal query so that # the compiler can be used to process results. self.low_mark, self.high_mark = 0, None # Used for offset/limit self.extra_select = {} self.annotation_select = {} def chain(self, using): return self.clone(using) def clone(self, using): return RawQuery(self.sql, using, params=self.params) def get_columns(self): if self.cursor is None: self._execute_query() converter = connections[self.using].introspection.identifier_converter return [converter(column_meta[0]) for column_meta in self.cursor.description] def __iter__(self): # Always execute a new query for a new iterator. # This could be optimized with a cache at the expense of RAM. self._execute_query() if not connections[self.using].features.can_use_chunked_reads: # If the database can't use chunked reads we need to make sure we # evaluate the entire query up front. result = list(self.cursor) else: result = self.cursor return iter(result) def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self) @property def params_type(self): return dict if isinstance(self.params, Mapping) else tuple def __str__(self): return self.sql % self.params_type(self.params) def _execute_query(self): connection = connections[self.using] # Adapt parameters to the database, as much as possible considering # that the target type isn't known. See #17755. params_type = self.params_type adapter = connection.ops.adapt_unknown_value if params_type is tuple: params = tuple(adapter(val) for val in self.params) elif params_type is dict: params = {key: adapter(val) for key, val in self.params.items()} else: raise RuntimeError("Unexpected params type: %s" % params_type) self.cursor = connection.cursor() self.cursor.execute(self.sql, params) class Query(BaseExpression): """A single SQL query.""" alias_prefix = 'T' subq_aliases = frozenset([alias_prefix]) compiler = 'SQLCompiler' def __init__(self, model, where=WhereNode, alias_cols=True): self.model = model self.alias_refcount = {} # alias_map is the most important data structure regarding joins. # It's used for recording which joins exist in the query and what # types they are. The key is the alias of the joined table (possibly # the table name) and the value is a Join-like object (see # sql.datastructures.Join for more information). self.alias_map = {} # Whether to provide alias to columns during reference resolving. self.alias_cols = alias_cols # Sometimes the query contains references to aliases in outer queries (as # a result of split_exclude). Correct alias quoting needs to know these # aliases too. # Map external tables to whether they are aliased. self.external_aliases = {} self.table_map = {} # Maps table names to list of aliases. self.default_cols = True self.default_ordering = True self.standard_ordering = True self.used_aliases = set() self.filter_is_sticky = False self.subquery = False # SQL-related attributes # Select and related select clauses are expressions to use in the # SELECT clause of the query. # The select is used for cases where we want to set up the select # clause to contain other than default fields (values(), subqueries...) # Note that annotations go to annotations dictionary. self.select = () self.where = where() self.where_class = where # The group_by attribute can have one of the following forms: # - None: no group by at all in the query # - A tuple of expressions: group by (at least) those expressions. # String refs are also allowed for now. # - True: group by all select fields of the model # See compiler.get_group_by() for details. self.group_by = None self.order_by = () self.low_mark, self.high_mark = 0, None # Used for offset/limit self.distinct = False self.distinct_fields = () self.select_for_update = False self.select_for_update_nowait = False self.select_for_update_skip_locked = False self.select_for_update_of = () self.select_related = False # Arbitrary limit for select_related to prevents infinite recursion. self.max_depth = 5 # Holds the selects defined by a call to values() or values_list() # excluding annotation_select and extra_select. self.values_select = () # SQL annotation-related attributes self.annotations = {} # Maps alias -> Annotation Expression self.annotation_select_mask = None self._annotation_select_cache = None # Set combination attributes self.combinator = None self.combinator_all = False self.combined_queries = () # These are for extensions. The contents are more or less appended # verbatim to the appropriate clause. self.extra = {} # Maps col_alias -> (col_sql, params). self.extra_select_mask = None self._extra_select_cache = None self.extra_tables = () self.extra_order_by = () # A tuple that is a set of model field names and either True, if these # are the fields to defer, or False if these are the only fields to # load. self.deferred_loading = (frozenset(), True) self._filtered_relations = {} self.explain_query = False self.explain_format = None self.explain_options = {} @property def output_field(self): if len(self.select) == 1: return self.select[0].field elif len(self.annotation_select) == 1: return next(iter(self.annotation_select.values())).output_field @property def has_select_fields(self): return bool(self.select or self.annotation_select_mask or self.extra_select_mask) @cached_property def base_table(self): for alias in self.alias_map: return alias def __str__(self): """ Return the query as a string of SQL with the parameter values substituted in (use sql_with_params() to see the unsubstituted string). Parameter values won't necessarily be quoted correctly, since that is done by the database interface at execution time. """ sql, params = self.sql_with_params() return sql % params def sql_with_params(self): """ Return the query as an SQL string and the parameters that will be substituted into the query. """ return self.get_compiler(DEFAULT_DB_ALIAS).as_sql() def __deepcopy__(self, memo): """Limit the amount of work when a Query is deepcopied.""" result = self.clone() memo[id(self)] = result return result def get_compiler(self, using=None, connection=None): if using is None and connection is None: raise ValueError("Need either using or connection") if using: connection = connections[using] return connection.ops.compiler(self.compiler)(self, connection, using) def get_meta(self): """ Return the Options instance (the model._meta) from which to start processing. Normally, this is self.model._meta, but it can be changed by subclasses. """ return self.model._meta def clone(self): """ Return a copy of the current Query. A lightweight alternative to to deepcopy(). """ obj = Empty() obj.__class__ = self.__class__ # Copy references to everything. obj.__dict__ = self.__dict__.copy() # Clone attributes that can't use shallow copy. obj.alias_refcount = self.alias_refcount.copy() obj.alias_map = self.alias_map.copy() obj.external_aliases = self.external_aliases.copy() obj.table_map = self.table_map.copy() obj.where = self.where.clone() obj.annotations = self.annotations.copy() if self.annotation_select_mask is None: obj.annotation_select_mask = None else: obj.annotation_select_mask = self.annotation_select_mask.copy() # _annotation_select_cache cannot be copied, as doing so breaks the # (necessary) state in which both annotations and # _annotation_select_cache point to the same underlying objects. # It will get re-populated in the cloned queryset the next time it's # used. obj._annotation_select_cache = None obj.extra = self.extra.copy() if self.extra_select_mask is None: obj.extra_select_mask = None else: obj.extra_select_mask = self.extra_select_mask.copy() if self._extra_select_cache is None: obj._extra_select_cache = None else: obj._extra_select_cache = self._extra_select_cache.copy() if self.select_related is not False: # Use deepcopy because select_related stores fields in nested # dicts. obj.select_related = copy.deepcopy(obj.select_related) if 'subq_aliases' in self.__dict__: obj.subq_aliases = self.subq_aliases.copy() obj.used_aliases = self.used_aliases.copy() obj._filtered_relations = self._filtered_relations.copy() # Clear the cached_property try: del obj.base_table except AttributeError: pass return obj def chain(self, klass=None): """ Return a copy of the current Query that's ready for another operation. The klass argument changes the type of the Query, e.g. UpdateQuery. """ obj = self.clone() if klass and obj.__class__ != klass: obj.__class__ = klass if not obj.filter_is_sticky: obj.used_aliases = set() obj.filter_is_sticky = False if hasattr(obj, '_setup_query'): obj._setup_query() return obj def relabeled_clone(self, change_map): clone = self.clone() clone.change_aliases(change_map) return clone def _get_col(self, target, field, alias): if not self.alias_cols: alias = None return target.get_col(alias, field) def rewrite_cols(self, annotation, col_cnt): # We must make sure the inner query has the referred columns in it. # If we are aggregating over an annotation, then Django uses Ref() # instances to note this. However, if we are annotating over a column # of a related model, then it might be that column isn't part of the # SELECT clause of the inner query, and we must manually make sure # the column is selected. An example case is: # .aggregate(Sum('author__awards')) # Resolving this expression results in a join to author, but there # is no guarantee the awards column of author is in the select clause # of the query. Thus we must manually add the column to the inner # query. orig_exprs = annotation.get_source_expressions() new_exprs = [] for expr in orig_exprs: # FIXME: These conditions are fairly arbitrary. Identify a better # method of having expressions decide which code path they should # take. if isinstance(expr, Ref): # Its already a Ref to subquery (see resolve_ref() for # details) new_exprs.append(expr) elif isinstance(expr, (WhereNode, Lookup)): # Decompose the subexpressions further. The code here is # copied from the else clause, but this condition must appear # before the contains_aggregate/is_summary condition below. new_expr, col_cnt = self.rewrite_cols(expr, col_cnt) new_exprs.append(new_expr) else: # Reuse aliases of expressions already selected in subquery. for col_alias, selected_annotation in self.annotation_select.items(): if selected_annotation == expr: new_expr = Ref(col_alias, expr) break else: # An expression that is not selected the subquery. if isinstance(expr, Col) or (expr.contains_aggregate and not expr.is_summary): # Reference column or another aggregate. Select it # under a non-conflicting alias. col_cnt += 1 col_alias = '__col%d' % col_cnt self.annotations[col_alias] = expr self.append_annotation_mask([col_alias]) new_expr = Ref(col_alias, expr) else: # Some other expression not referencing database values # directly. Its subexpression might contain Cols. new_expr, col_cnt = self.rewrite_cols(expr, col_cnt) new_exprs.append(new_expr) annotation.set_source_expressions(new_exprs) return annotation, col_cnt def get_aggregation(self, using, added_aggregate_names): """ Return the dictionary with the values of the existing aggregations. """ if not self.annotation_select: return {} existing_annotations = [ annotation for alias, annotation in self.annotations.items() if alias not in added_aggregate_names ] # Decide if we need to use a subquery. # # Existing annotations would cause incorrect results as get_aggregation() # must produce just one result and thus must not use GROUP BY. But we # aren't smart enough to remove the existing annotations from the # query, so those would force us to use GROUP BY. # # If the query has limit or distinct, or uses set operations, then # those operations must be done in a subquery so that the query # aggregates on the limit and/or distinct results instead of applying # the distinct and limit after the aggregation. if (isinstance(self.group_by, tuple) or self.is_sliced or existing_annotations or self.distinct or self.combinator): from django.db.models.sql.subqueries import AggregateQuery outer_query = AggregateQuery(self.model) inner_query = self.clone() inner_query.select_for_update = False inner_query.select_related = False inner_query.set_annotation_mask(self.annotation_select) if not self.is_sliced and not self.distinct_fields: # Queries with distinct_fields need ordering and when a limit # is applied we must take the slice from the ordered query. # Otherwise no need for ordering. inner_query.clear_ordering(True) if not inner_query.distinct: # If the inner query uses default select and it has some # aggregate annotations, then we must make sure the inner # query is grouped by the main model's primary key. However, # clearing the select clause can alter results if distinct is # used. has_existing_aggregate_annotations = any( annotation for annotation in existing_annotations if getattr(annotation, 'contains_aggregate', True) ) if inner_query.default_cols and has_existing_aggregate_annotations: inner_query.group_by = (self.model._meta.pk.get_col(inner_query.get_initial_alias()),) inner_query.default_cols = False relabels = {t: 'subquery' for t in inner_query.alias_map} relabels[None] = 'subquery' # Remove any aggregates marked for reduction from the subquery # and move them to the outer AggregateQuery. col_cnt = 0 for alias, expression in list(inner_query.annotation_select.items()): annotation_select_mask = inner_query.annotation_select_mask if expression.is_summary: expression, col_cnt = inner_query.rewrite_cols(expression, col_cnt) outer_query.annotations[alias] = expression.relabeled_clone(relabels) del inner_query.annotations[alias] annotation_select_mask.remove(alias) # Make sure the annotation_select wont use cached results. inner_query.set_annotation_mask(inner_query.annotation_select_mask) if inner_query.select == () and not inner_query.default_cols and not inner_query.annotation_select_mask: # In case of Model.objects[0:3].count(), there would be no # field selected in the inner query, yet we must use a subquery. # So, make sure at least one field is selected. inner_query.select = (self.model._meta.pk.get_col(inner_query.get_initial_alias()),) try: outer_query.add_subquery(inner_query, using) except EmptyResultSet: return { alias: None for alias in outer_query.annotation_select } else: outer_query = self self.select = () self.default_cols = False self.extra = {} outer_query.clear_ordering(True) outer_query.clear_limits() outer_query.select_for_update = False outer_query.select_related = False compiler = outer_query.get_compiler(using) result = compiler.execute_sql(SINGLE) if result is None: result = [None] * len(outer_query.annotation_select) converters = compiler.get_converters(outer_query.annotation_select.values()) result = next(compiler.apply_converters((result,), converters)) return dict(zip(outer_query.annotation_select, result)) def get_count(self, using): """ Perform a COUNT() query using the current filter constraints. """ obj = self.clone() obj.add_annotation(Count('*'), alias='__count', is_summary=True) number = obj.get_aggregation(using, ['__count'])['__count'] if number is None: number = 0 return number def has_filters(self): return self.where def has_results(self, using): q = self.clone() if not q.distinct: if q.group_by is True: q.add_fields((f.attname for f in self.model._meta.concrete_fields), False) # Disable GROUP BY aliases to avoid orphaning references to the # SELECT clause which is about to be cleared. q.set_group_by(allow_aliases=False) q.clear_select_clause() q.clear_ordering(True) q.set_limits(high=1) compiler = q.get_compiler(using=using) return compiler.has_results() def explain(self, using, format=None, **options): q = self.clone() q.explain_query = True q.explain_format = format q.explain_options = options compiler = q.get_compiler(using=using) return '\n'.join(compiler.explain_query()) def combine(self, rhs, connector): """ Merge the 'rhs' query into the current one (with any 'rhs' effects being applied *after* (that is, "to the right of") anything in the current query. 'rhs' is not modified during a call to this function. The 'connector' parameter describes how to connect filters from the 'rhs' query. """ assert self.model == rhs.model, \ "Cannot combine queries on two different base models." assert not self.is_sliced, \ "Cannot combine queries once a slice has been taken." assert self.distinct == rhs.distinct, \ "Cannot combine a unique query with a non-unique query." assert self.distinct_fields == rhs.distinct_fields, \ "Cannot combine queries with different distinct fields." # Work out how to relabel the rhs aliases, if necessary. change_map = {} conjunction = (connector == AND) # Determine which existing joins can be reused. When combining the # query with AND we must recreate all joins for m2m filters. When # combining with OR we can reuse joins. The reason is that in AND # case a single row can't fulfill a condition like: # revrel__col=1 & revrel__col=2 # But, there might be two different related rows matching this # condition. In OR case a single True is enough, so single row is # enough, too. # # Note that we will be creating duplicate joins for non-m2m joins in # the AND case. The results will be correct but this creates too many # joins. This is something that could be fixed later on. reuse = set() if conjunction else set(self.alias_map) # Base table must be present in the query - this is the same # table on both sides. self.get_initial_alias() joinpromoter = JoinPromoter(connector, 2, False) joinpromoter.add_votes( j for j in self.alias_map if self.alias_map[j].join_type == INNER) rhs_votes = set() # Now, add the joins from rhs query into the new query (skipping base # table). rhs_tables = list(rhs.alias_map)[1:] for alias in rhs_tables: join = rhs.alias_map[alias] # If the left side of the join was already relabeled, use the # updated alias. join = join.relabeled_clone(change_map) new_alias = self.join(join, reuse=reuse) if join.join_type == INNER: rhs_votes.add(new_alias) # We can't reuse the same join again in the query. If we have two # distinct joins for the same connection in rhs query, then the # combined query must have two joins, too. reuse.discard(new_alias) if alias != new_alias: change_map[alias] = new_alias if not rhs.alias_refcount[alias]: # The alias was unused in the rhs query. Unref it so that it # will be unused in the new query, too. We have to add and # unref the alias so that join promotion has information of # the join type for the unused alias. self.unref_alias(new_alias) joinpromoter.add_votes(rhs_votes) joinpromoter.update_join_types(self) # Now relabel a copy of the rhs where-clause and add it to the current # one. w = rhs.where.clone() w.relabel_aliases(change_map) self.where.add(w, connector) # Selection columns and extra extensions are those provided by 'rhs'. if rhs.select: self.set_select([col.relabeled_clone(change_map) for col in rhs.select]) else: self.select = () if connector == OR: # It would be nice to be able to handle this, but the queries don't # really make sense (or return consistent value sets). Not worth # the extra complexity when you can write a real query instead. if self.extra and rhs.extra: raise ValueError("When merging querysets using 'or', you cannot have extra(select=...) on both sides.") self.extra.update(rhs.extra) extra_select_mask = set() if self.extra_select_mask is not None: extra_select_mask.update(self.extra_select_mask) if rhs.extra_select_mask is not None: extra_select_mask.update(rhs.extra_select_mask) if extra_select_mask: self.set_extra_mask(extra_select_mask) self.extra_tables += rhs.extra_tables # Ordering uses the 'rhs' ordering, unless it has none, in which case # the current ordering is used. self.order_by = rhs.order_by or self.order_by self.extra_order_by = rhs.extra_order_by or self.extra_order_by def deferred_to_data(self, target, callback): """ Convert the self.deferred_loading data structure to an alternate data structure, describing the field that *will* be loaded. This is used to compute the columns to select from the database and also by the QuerySet class to work out which fields are being initialized on each model. Models that have all their fields included aren't mentioned in the result, only those that have field restrictions in place. The "target" parameter is the instance that is populated (in place). The "callback" is a function that is called whenever a (model, field) pair need to be added to "target". It accepts three parameters: "target", and the model and list of fields being added for that model. """ field_names, defer = self.deferred_loading if not field_names: return orig_opts = self.get_meta() seen = {} must_include = {orig_opts.concrete_model: {orig_opts.pk}} for field_name in field_names: parts = field_name.split(LOOKUP_SEP) cur_model = self.model._meta.concrete_model opts = orig_opts for name in parts[:-1]: old_model = cur_model if name in self._filtered_relations: name = self._filtered_relations[name].relation_name source = opts.get_field(name) if is_reverse_o2o(source): cur_model = source.related_model else: cur_model = source.remote_field.model opts = cur_model._meta # Even if we're "just passing through" this model, we must add # both the current model's pk and the related reference field # (if it's not a reverse relation) to the things we select. if not is_reverse_o2o(source): must_include[old_model].add(source) add_to_dict(must_include, cur_model, opts.pk) field = opts.get_field(parts[-1]) is_reverse_object = field.auto_created and not field.concrete model = field.related_model if is_reverse_object else field.model model = model._meta.concrete_model if model == opts.model: model = cur_model if not is_reverse_o2o(field): add_to_dict(seen, model, field) if defer: # We need to load all fields for each model, except those that # appear in "seen" (for all models that appear in "seen"). The only # slight complexity here is handling fields that exist on parent # models. workset = {} for model, values in seen.items(): for field in model._meta.local_fields: if field not in values: m = field.model._meta.concrete_model add_to_dict(workset, m, field) for model, values in must_include.items(): # If we haven't included a model in workset, we don't add the # corresponding must_include fields for that model, since an # empty set means "include all fields". That's why there's no # "else" branch here. if model in workset: workset[model].update(values) for model, values in workset.items(): callback(target, model, values) else: for model, values in must_include.items(): if model in seen: seen[model].update(values) else: # As we've passed through this model, but not explicitly # included any fields, we have to make sure it's mentioned # so that only the "must include" fields are pulled in. seen[model] = values # Now ensure that every model in the inheritance chain is mentioned # in the parent list. Again, it must be mentioned to ensure that # only "must include" fields are pulled in. for model in orig_opts.get_parent_list(): seen.setdefault(model, set()) for model, values in seen.items(): callback(target, model, values) def table_alias(self, table_name, create=False, filtered_relation=None): """ Return a table alias for the given table_name and whether this is a new alias or not. If 'create' is true, a new alias is always created. Otherwise, the most recently created alias for the table (if one exists) is reused. """ alias_list = self.table_map.get(table_name) if not create and alias_list: alias = alias_list[0] self.alias_refcount[alias] += 1 return alias, False # Create a new alias for this table. if alias_list: alias = '%s%d' % (self.alias_prefix, len(self.alias_map) + 1) alias_list.append(alias) else: # The first occurrence of a table uses the table name directly. alias = filtered_relation.alias if filtered_relation is not None else table_name self.table_map[table_name] = [alias] self.alias_refcount[alias] = 1 return alias, True def ref_alias(self, alias): """Increases the reference count for this alias.""" self.alias_refcount[alias] += 1 def unref_alias(self, alias, amount=1): """Decreases the reference count for this alias.""" self.alias_refcount[alias] -= amount def promote_joins(self, aliases): """ Promote recursively the join type of given aliases and its children to an outer join. If 'unconditional' is False, only promote the join if it is nullable or the parent join is an outer join. The children promotion is done to avoid join chains that contain a LOUTER b INNER c. So, if we have currently a INNER b INNER c and a->b is promoted, then we must also promote b->c automatically, or otherwise the promotion of a->b doesn't actually change anything in the query results. """ aliases = list(aliases) while aliases: alias = aliases.pop(0) if self.alias_map[alias].join_type is None: # This is the base table (first FROM entry) - this table # isn't really joined at all in the query, so we should not # alter its join type. continue # Only the first alias (skipped above) should have None join_type assert self.alias_map[alias].join_type is not None parent_alias = self.alias_map[alias].parent_alias parent_louter = parent_alias and self.alias_map[parent_alias].join_type == LOUTER already_louter = self.alias_map[alias].join_type == LOUTER if ((self.alias_map[alias].nullable or parent_louter) and not already_louter): self.alias_map[alias] = self.alias_map[alias].promote() # Join type of 'alias' changed, so re-examine all aliases that # refer to this one. aliases.extend( join for join in self.alias_map if self.alias_map[join].parent_alias == alias and join not in aliases ) def demote_joins(self, aliases): """ Change join type from LOUTER to INNER for all joins in aliases. Similarly to promote_joins(), this method must ensure no join chains containing first an outer, then an inner join are generated. If we are demoting b->c join in chain a LOUTER b LOUTER c then we must demote a->b automatically, or otherwise the demotion of b->c doesn't actually change anything in the query results. . """ aliases = list(aliases) while aliases: alias = aliases.pop(0) if self.alias_map[alias].join_type == LOUTER: self.alias_map[alias] = self.alias_map[alias].demote() parent_alias = self.alias_map[alias].parent_alias if self.alias_map[parent_alias].join_type == INNER: aliases.append(parent_alias) def reset_refcounts(self, to_counts): """ Reset reference counts for aliases so that they match the value passed in `to_counts`. """ for alias, cur_refcount in self.alias_refcount.copy().items(): unref_amount = cur_refcount - to_counts.get(alias, 0) self.unref_alias(alias, unref_amount) def change_aliases(self, change_map): """ Change the aliases in change_map (which maps old-alias -> new-alias), relabelling any references to them in select columns and the where clause. """ assert set(change_map).isdisjoint(change_map.values()) # 1. Update references in "select" (normal columns plus aliases), # "group by" and "where". self.where.relabel_aliases(change_map) if isinstance(self.group_by, tuple): self.group_by = tuple([col.relabeled_clone(change_map) for col in self.group_by]) self.select = tuple([col.relabeled_clone(change_map) for col in self.select]) self.annotations = self.annotations and { key: col.relabeled_clone(change_map) for key, col in self.annotations.items() } # 2. Rename the alias in the internal table/alias datastructures. for old_alias, new_alias in change_map.items(): if old_alias not in self.alias_map: continue alias_data = self.alias_map[old_alias].relabeled_clone(change_map) self.alias_map[new_alias] = alias_data self.alias_refcount[new_alias] = self.alias_refcount[old_alias] del self.alias_refcount[old_alias] del self.alias_map[old_alias] table_aliases = self.table_map[alias_data.table_name] for pos, alias in enumerate(table_aliases): if alias == old_alias: table_aliases[pos] = new_alias break self.external_aliases = { # Table is aliased or it's being changed and thus is aliased. change_map.get(alias, alias): (aliased or alias in change_map) for alias, aliased in self.external_aliases.items() } def bump_prefix(self, outer_query): """ Change the alias prefix to the next letter in the alphabet in a way that the outer query's aliases and this query's aliases will not conflict. Even tables that previously had no alias will get an alias after this call. """ def prefix_gen(): """ Generate a sequence of characters in alphabetical order: -> 'A', 'B', 'C', ... When the alphabet is finished, the sequence will continue with the Cartesian product: -> 'AA', 'AB', 'AC', ... """ alphabet = ascii_uppercase prefix = chr(ord(self.alias_prefix) + 1) yield prefix for n in count(1): seq = alphabet[alphabet.index(prefix):] if prefix else alphabet for s in product(seq, repeat=n): yield ''.join(s) prefix = None if self.alias_prefix != outer_query.alias_prefix: # No clashes between self and outer query should be possible. return # Explicitly avoid infinite loop. The constant divider is based on how # much depth recursive subquery references add to the stack. This value # might need to be adjusted when adding or removing function calls from # the code path in charge of performing these operations. local_recursion_limit = sys.getrecursionlimit() // 16 for pos, prefix in enumerate(prefix_gen()): if prefix not in self.subq_aliases: self.alias_prefix = prefix break if pos > local_recursion_limit: raise RecursionError( 'Maximum recursion depth exceeded: too many subqueries.' ) self.subq_aliases = self.subq_aliases.union([self.alias_prefix]) outer_query.subq_aliases = outer_query.subq_aliases.union(self.subq_aliases) self.change_aliases({ alias: '%s%d' % (self.alias_prefix, pos) for pos, alias in enumerate(self.alias_map) }) def get_initial_alias(self): """ Return the first alias for this query, after increasing its reference count. """ if self.alias_map: alias = self.base_table self.ref_alias(alias) else: alias = self.join(BaseTable(self.get_meta().db_table, None)) return alias def count_active_tables(self): """ Return the number of tables in this query with a non-zero reference count. After execution, the reference counts are zeroed, so tables added in compiler will not be seen by this method. """ return len([1 for count in self.alias_refcount.values() if count]) def join(self, join, reuse=None, reuse_with_filtered_relation=False): """ Return an alias for the 'join', either reusing an existing alias for that join or creating a new one. 'join' is either a sql.datastructures.BaseTable or Join. The 'reuse' parameter can be either None which means all joins are reusable, or it can be a set containing the aliases that can be reused. The 'reuse_with_filtered_relation' parameter is used when computing FilteredRelation instances. A join is always created as LOUTER if the lhs alias is LOUTER to make sure chains like t1 LOUTER t2 INNER t3 aren't generated. All new joins are created as LOUTER if the join is nullable. """ if reuse_with_filtered_relation and reuse: reuse_aliases = [ a for a, j in self.alias_map.items() if a in reuse and j.equals(join, with_filtered_relation=False) ] else: reuse_aliases = [ a for a, j in self.alias_map.items() if (reuse is None or a in reuse) and j == join ] if reuse_aliases: if join.table_alias in reuse_aliases: reuse_alias = join.table_alias else: # Reuse the most recent alias of the joined table # (a many-to-many relation may be joined multiple times). reuse_alias = reuse_aliases[-1] self.ref_alias(reuse_alias) return reuse_alias # No reuse is possible, so we need a new alias. alias, _ = self.table_alias(join.table_name, create=True, filtered_relation=join.filtered_relation) if join.join_type: if self.alias_map[join.parent_alias].join_type == LOUTER or join.nullable: join_type = LOUTER else: join_type = INNER join.join_type = join_type join.table_alias = alias self.alias_map[alias] = join return alias def join_parent_model(self, opts, model, alias, seen): """ Make sure the given 'model' is joined in the query. If 'model' isn't a parent of 'opts' or if it is None this method is a no-op. The 'alias' is the root alias for starting the join, 'seen' is a dict of model -> alias of existing joins. It must also contain a mapping of None -> some alias. This will be returned in the no-op case. """ if model in seen: return seen[model] chain = opts.get_base_chain(model) if not chain: return alias curr_opts = opts for int_model in chain: if int_model in seen: curr_opts = int_model._meta alias = seen[int_model] continue # Proxy model have elements in base chain # with no parents, assign the new options # object and skip to the next base in that # case if not curr_opts.parents[int_model]: curr_opts = int_model._meta continue link_field = curr_opts.get_ancestor_link(int_model) join_info = self.setup_joins([link_field.name], curr_opts, alias) curr_opts = int_model._meta alias = seen[int_model] = join_info.joins[-1] return alias or seen[None] def add_annotation(self, annotation, alias, is_summary=False): """Add a single annotation expression to the Query.""" annotation = annotation.resolve_expression(self, allow_joins=True, reuse=None, summarize=is_summary) self.append_annotation_mask([alias]) self.annotations[alias] = annotation def resolve_expression(self, query, *args, **kwargs): clone = self.clone() # Subqueries need to use a different set of aliases than the outer query. clone.bump_prefix(query) clone.subquery = True # It's safe to drop ordering if the queryset isn't using slicing, # distinct(*fields) or select_for_update(). if (self.low_mark == 0 and self.high_mark is None and not self.distinct_fields and not self.select_for_update): clone.clear_ordering(True) clone.where.resolve_expression(query, *args, **kwargs) for key, value in clone.annotations.items(): resolved = value.resolve_expression(query, *args, **kwargs) if hasattr(resolved, 'external_aliases'): resolved.external_aliases.update(clone.external_aliases) clone.annotations[key] = resolved # Outer query's aliases are considered external. for alias, table in query.alias_map.items(): clone.external_aliases[alias] = ( (isinstance(table, Join) and table.join_field.related_model._meta.db_table != alias) or (isinstance(table, BaseTable) and table.table_name != table.table_alias) ) return clone def get_external_cols(self): exprs = chain(self.annotations.values(), self.where.children) return [ col for col in self._gen_cols(exprs) if col.alias in self.external_aliases ] def as_sql(self, compiler, connection): sql, params = self.get_compiler(connection=connection).as_sql() if self.subquery: sql = '(%s)' % sql return sql, params def resolve_lookup_value(self, value, can_reuse, allow_joins): if hasattr(value, 'resolve_expression'): value = value.resolve_expression( self, reuse=can_reuse, allow_joins=allow_joins, ) elif isinstance(value, (list, tuple)): # The items of the iterable may be expressions and therefore need # to be resolved independently. return type(value)( self.resolve_lookup_value(sub_value, can_reuse, allow_joins) for sub_value in value ) return value def solve_lookup_type(self, lookup): """ Solve the lookup type from the lookup (e.g.: 'foobar__id__icontains'). """ lookup_splitted = lookup.split(LOOKUP_SEP) if self.annotations: expression, expression_lookups = refs_expression(lookup_splitted, self.annotations) if expression: return expression_lookups, (), expression _, field, _, lookup_parts = self.names_to_path(lookup_splitted, self.get_meta()) field_parts = lookup_splitted[0:len(lookup_splitted) - len(lookup_parts)] if len(lookup_parts) > 1 and not field_parts: raise FieldError( 'Invalid lookup "%s" for model %s".' % (lookup, self.get_meta().model.__name__) ) return lookup_parts, field_parts, False def check_query_object_type(self, value, opts, field): """ Check whether the object passed while querying is of the correct type. If not, raise a ValueError specifying the wrong object. """ if hasattr(value, '_meta'): if not check_rel_lookup_compatibility(value._meta.model, opts, field): raise ValueError( 'Cannot query "%s": Must be "%s" instance.' % (value, opts.object_name)) def check_related_objects(self, field, value, opts): """Check the type of object passed to query relations.""" if field.is_relation: # Check that the field and the queryset use the same model in a # query like .filter(author=Author.objects.all()). For example, the # opts would be Author's (from the author field) and value.model # would be Author.objects.all() queryset's .model (Author also). # The field is the related field on the lhs side. if (isinstance(value, Query) and not value.has_select_fields and not check_rel_lookup_compatibility(value.model, opts, field)): raise ValueError( 'Cannot use QuerySet for "%s": Use a QuerySet for "%s".' % (value.model._meta.object_name, opts.object_name) ) elif hasattr(value, '_meta'): self.check_query_object_type(value, opts, field) elif hasattr(value, '__iter__'): for v in value: self.check_query_object_type(v, opts, field) def check_filterable(self, expression): """Raise an error if expression cannot be used in a WHERE clause.""" if not getattr(expression, 'filterable', True): raise NotSupportedError( expression.__class__.__name__ + ' is disallowed in the filter ' 'clause.' ) if hasattr(expression, 'get_source_expressions'): for expr in expression.get_source_expressions(): self.check_filterable(expr) def build_lookup(self, lookups, lhs, rhs): """ Try to extract transforms and lookup from given lhs. The lhs value is something that works like SQLExpression. The rhs value is what the lookup is going to compare against. The lookups is a list of names to extract using get_lookup() and get_transform(). """ # __exact is the default lookup if one isn't given. *transforms, lookup_name = lookups or ['exact'] for name in transforms: lhs = self.try_transform(lhs, name) # First try get_lookup() so that the lookup takes precedence if the lhs # supports both transform and lookup for the name. lookup_class = lhs.get_lookup(lookup_name) if not lookup_class: if lhs.field.is_relation: raise FieldError('Related Field got invalid lookup: {}'.format(lookup_name)) # A lookup wasn't found. Try to interpret the name as a transform # and do an Exact lookup against it. lhs = self.try_transform(lhs, lookup_name) lookup_name = 'exact' lookup_class = lhs.get_lookup(lookup_name) if not lookup_class: return lookup = lookup_class(lhs, rhs) # Interpret '__exact=None' as the sql 'is NULL'; otherwise, reject all # uses of None as a query value unless the lookup supports it. if lookup.rhs is None and not lookup.can_use_none_as_rhs: if lookup_name not in ('exact', 'iexact'): raise ValueError("Cannot use None as a query value") return lhs.get_lookup('isnull')(lhs, True) # For Oracle '' is equivalent to null. The check must be done at this # stage because join promotion can't be done in the compiler. Using # DEFAULT_DB_ALIAS isn't nice but it's the best that can be done here. # A similar thing is done in is_nullable(), too. if (connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls and lookup_name == 'exact' and lookup.rhs == ''): return lhs.get_lookup('isnull')(lhs, True) return lookup def try_transform(self, lhs, name): """ Helper method for build_lookup(). Try to fetch and initialize a transform for name parameter from lhs. """ transform_class = lhs.get_transform(name) if transform_class: return transform_class(lhs) else: output_field = lhs.output_field.__class__ suggested_lookups = difflib.get_close_matches(name, output_field.get_lookups()) if suggested_lookups: suggestion = ', perhaps you meant %s?' % ' or '.join(suggested_lookups) else: suggestion = '.' raise FieldError( "Unsupported lookup '%s' for %s or join on the field not " "permitted%s" % (name, output_field.__name__, suggestion) ) def build_filter(self, filter_expr, branch_negated=False, current_negated=False, can_reuse=None, allow_joins=True, split_subq=True, reuse_with_filtered_relation=False, check_filterable=True): """ Build a WhereNode for a single filter clause but don't add it to this Query. Query.add_q() will then add this filter to the where Node. The 'branch_negated' tells us if the current branch contains any negations. This will be used to determine if subqueries are needed. The 'current_negated' is used to determine if the current filter is negated or not and this will be used to determine if IS NULL filtering is needed. The difference between current_negated and branch_negated is that branch_negated is set on first negation, but current_negated is flipped for each negation. Note that add_filter will not do any negating itself, that is done upper in the code by add_q(). The 'can_reuse' is a set of reusable joins for multijoins. If 'reuse_with_filtered_relation' is True, then only joins in can_reuse will be reused. The method will create a filter clause that can be added to the current query. However, if the filter isn't added to the query then the caller is responsible for unreffing the joins used. """ if isinstance(filter_expr, dict): raise FieldError("Cannot parse keyword query as dict") if isinstance(filter_expr, Q): return self._add_q( filter_expr, branch_negated=branch_negated, current_negated=current_negated, used_aliases=can_reuse, allow_joins=allow_joins, split_subq=split_subq, check_filterable=check_filterable, ) if hasattr(filter_expr, 'resolve_expression'): if not getattr(filter_expr, 'conditional', False): raise TypeError('Cannot filter against a non-conditional expression.') condition = self.build_lookup( ['exact'], filter_expr.resolve_expression(self, allow_joins=allow_joins), True ) clause = self.where_class() clause.add(condition, AND) return clause, [] arg, value = filter_expr if not arg: raise FieldError("Cannot parse keyword query %r" % arg) lookups, parts, reffed_expression = self.solve_lookup_type(arg) if check_filterable: self.check_filterable(reffed_expression) if not allow_joins and len(parts) > 1: raise FieldError("Joined field references are not permitted in this query") pre_joins = self.alias_refcount.copy() value = self.resolve_lookup_value(value, can_reuse, allow_joins) used_joins = {k for k, v in self.alias_refcount.items() if v > pre_joins.get(k, 0)} if check_filterable: self.check_filterable(value) clause = self.where_class() if reffed_expression: condition = self.build_lookup(lookups, reffed_expression, value) clause.add(condition, AND) return clause, [] opts = self.get_meta() alias = self.get_initial_alias() allow_many = not branch_negated or not split_subq try: join_info = self.setup_joins( parts, opts, alias, can_reuse=can_reuse, allow_many=allow_many, reuse_with_filtered_relation=reuse_with_filtered_relation, ) # Prevent iterator from being consumed by check_related_objects() if isinstance(value, Iterator): value = list(value) self.check_related_objects(join_info.final_field, value, join_info.opts) # split_exclude() needs to know which joins were generated for the # lookup parts self._lookup_joins = join_info.joins except MultiJoin as e: return self.split_exclude(filter_expr, can_reuse, e.names_with_path) # Update used_joins before trimming since they are reused to determine # which joins could be later promoted to INNER. used_joins.update(join_info.joins) targets, alias, join_list = self.trim_joins(join_info.targets, join_info.joins, join_info.path) if can_reuse is not None: can_reuse.update(join_list) if join_info.final_field.is_relation: # No support for transforms for relational fields num_lookups = len(lookups) if num_lookups > 1: raise FieldError('Related Field got invalid lookup: {}'.format(lookups[0])) if len(targets) == 1: col = self._get_col(targets[0], join_info.final_field, alias) else: col = MultiColSource(alias, targets, join_info.targets, join_info.final_field) else: col = self._get_col(targets[0], join_info.final_field, alias) condition = self.build_lookup(lookups, col, value) lookup_type = condition.lookup_name clause.add(condition, AND) require_outer = lookup_type == 'isnull' and condition.rhs is True and not current_negated if current_negated and (lookup_type != 'isnull' or condition.rhs is False) and condition.rhs is not None: require_outer = True if (lookup_type != 'isnull' and ( self.is_nullable(targets[0]) or self.alias_map[join_list[-1]].join_type == LOUTER)): # The condition added here will be SQL like this: # NOT (col IS NOT NULL), where the first NOT is added in # upper layers of code. The reason for addition is that if col # is null, then col != someval will result in SQL "unknown" # which isn't the same as in Python. The Python None handling # is wanted, and it can be gotten by # (col IS NULL OR col != someval) # <=> # NOT (col IS NOT NULL AND col = someval). lookup_class = targets[0].get_lookup('isnull') col = self._get_col(targets[0], join_info.targets[0], alias) clause.add(lookup_class(col, False), AND) return clause, used_joins if not require_outer else () def add_filter(self, filter_clause): self.add_q(Q(**{filter_clause[0]: filter_clause[1]})) def add_q(self, q_object): """ A preprocessor for the internal _add_q(). Responsible for doing final join promotion. """ # For join promotion this case is doing an AND for the added q_object # and existing conditions. So, any existing inner join forces the join # type to remain inner. Existing outer joins can however be demoted. # (Consider case where rel_a is LOUTER and rel_a__col=1 is added - if # rel_a doesn't produce any rows, then the whole condition must fail. # So, demotion is OK. existing_inner = {a for a in self.alias_map if self.alias_map[a].join_type == INNER} clause, _ = self._add_q(q_object, self.used_aliases) if clause: self.where.add(clause, AND) self.demote_joins(existing_inner) def build_where(self, filter_expr): return self.build_filter(filter_expr, allow_joins=False)[0] def _add_q(self, q_object, used_aliases, branch_negated=False, current_negated=False, allow_joins=True, split_subq=True, check_filterable=True): """Add a Q-object to the current filter.""" connector = q_object.connector current_negated = current_negated ^ q_object.negated branch_negated = branch_negated or q_object.negated target_clause = self.where_class(connector=connector, negated=q_object.negated) joinpromoter = JoinPromoter(q_object.connector, len(q_object.children), current_negated) for child in q_object.children: child_clause, needed_inner = self.build_filter( child, can_reuse=used_aliases, branch_negated=branch_negated, current_negated=current_negated, allow_joins=allow_joins, split_subq=split_subq, check_filterable=check_filterable, ) joinpromoter.add_votes(needed_inner) if child_clause: target_clause.add(child_clause, connector) needed_inner = joinpromoter.update_join_types(self) return target_clause, needed_inner def build_filtered_relation_q(self, q_object, reuse, branch_negated=False, current_negated=False): """Add a FilteredRelation object to the current filter.""" connector = q_object.connector current_negated ^= q_object.negated branch_negated = branch_negated or q_object.negated target_clause = self.where_class(connector=connector, negated=q_object.negated) for child in q_object.children: if isinstance(child, Node): child_clause = self.build_filtered_relation_q( child, reuse=reuse, branch_negated=branch_negated, current_negated=current_negated, ) else: child_clause, _ = self.build_filter( child, can_reuse=reuse, branch_negated=branch_negated, current_negated=current_negated, allow_joins=True, split_subq=False, reuse_with_filtered_relation=True, ) target_clause.add(child_clause, connector) return target_clause def add_filtered_relation(self, filtered_relation, alias): filtered_relation.alias = alias lookups = dict(get_children_from_q(filtered_relation.condition)) for lookup in chain((filtered_relation.relation_name,), lookups): lookup_parts, field_parts, _ = self.solve_lookup_type(lookup) shift = 2 if not lookup_parts else 1 if len(field_parts) > (shift + len(lookup_parts)): raise ValueError( "FilteredRelation's condition doesn't support nested " "relations (got %r)." % lookup ) self._filtered_relations[filtered_relation.alias] = filtered_relation def names_to_path(self, names, opts, allow_many=True, fail_on_missing=False): """ Walk the list of names and turns them into PathInfo tuples. A single name in 'names' can generate multiple PathInfos (m2m, for example). 'names' is the path of names to travel, 'opts' is the model Options we start the name resolving from, 'allow_many' is as for setup_joins(). If fail_on_missing is set to True, then a name that can't be resolved will generate a FieldError. Return a list of PathInfo tuples. In addition return the final field (the last used join field) and target (which is a field guaranteed to contain the same value as the final field). Finally, return those names that weren't found (which are likely transforms and the final lookup). """ path, names_with_path = [], [] for pos, name in enumerate(names): cur_names_with_path = (name, []) if name == 'pk': name = opts.pk.name field = None filtered_relation = None try: field = opts.get_field(name) except FieldDoesNotExist: if name in self.annotation_select: field = self.annotation_select[name].output_field elif name in self._filtered_relations and pos == 0: filtered_relation = self._filtered_relations[name] field = opts.get_field(filtered_relation.relation_name) if field is not None: # Fields that contain one-to-many relations with a generic # model (like a GenericForeignKey) cannot generate reverse # relations and therefore cannot be used for reverse querying. if field.is_relation and not field.related_model: raise FieldError( "Field %r does not generate an automatic reverse " "relation and therefore cannot be used for reverse " "querying. If it is a GenericForeignKey, consider " "adding a GenericRelation." % name ) try: model = field.model._meta.concrete_model except AttributeError: # QuerySet.annotate() may introduce fields that aren't # attached to a model. model = None else: # We didn't find the current field, so move position back # one step. pos -= 1 if pos == -1 or fail_on_missing: available = sorted([ *get_field_names_from_opts(opts), *self.annotation_select, *self._filtered_relations, ]) raise FieldError("Cannot resolve keyword '%s' into field. " "Choices are: %s" % (name, ", ".join(available))) break # Check if we need any joins for concrete inheritance cases (the # field lives in parent, but we are currently in one of its # children) if model is not opts.model: path_to_parent = opts.get_path_to_parent(model) if path_to_parent: path.extend(path_to_parent) cur_names_with_path[1].extend(path_to_parent) opts = path_to_parent[-1].to_opts if hasattr(field, 'get_path_info'): pathinfos = field.get_path_info(filtered_relation) if not allow_many: for inner_pos, p in enumerate(pathinfos): if p.m2m: cur_names_with_path[1].extend(pathinfos[0:inner_pos + 1]) names_with_path.append(cur_names_with_path) raise MultiJoin(pos + 1, names_with_path) last = pathinfos[-1] path.extend(pathinfos) final_field = last.join_field opts = last.to_opts targets = last.target_fields cur_names_with_path[1].extend(pathinfos) names_with_path.append(cur_names_with_path) else: # Local non-relational field. final_field = field targets = (field,) if fail_on_missing and pos + 1 != len(names): raise FieldError( "Cannot resolve keyword %r into field. Join on '%s'" " not permitted." % (names[pos + 1], name)) break return path, final_field, targets, names[pos + 1:] def setup_joins(self, names, opts, alias, can_reuse=None, allow_many=True, reuse_with_filtered_relation=False): """ Compute the necessary table joins for the passage through the fields given in 'names'. 'opts' is the Options class for the current model (which gives the table we are starting from), 'alias' is the alias for the table to start the joining from. The 'can_reuse' defines the reverse foreign key joins we can reuse. It can be None in which case all joins are reusable or a set of aliases that can be reused. Note that non-reverse foreign keys are always reusable when using setup_joins(). The 'reuse_with_filtered_relation' can be used to force 'can_reuse' parameter and force the relation on the given connections. If 'allow_many' is False, then any reverse foreign key seen will generate a MultiJoin exception. Return the final field involved in the joins, the target field (used for any 'where' constraint), the final 'opts' value, the joins, the field path traveled to generate the joins, and a transform function that takes a field and alias and is equivalent to `field.get_col(alias)` in the simple case but wraps field transforms if they were included in names. The target field is the field containing the concrete value. Final field can be something different, for example foreign key pointing to that value. Final field is needed for example in some value conversions (convert 'obj' in fk__id=obj to pk val using the foreign key field for example). """ joins = [alias] # The transform can't be applied yet, as joins must be trimmed later. # To avoid making every caller of this method look up transforms # directly, compute transforms here and create a partial that converts # fields to the appropriate wrapped version. def final_transformer(field, alias): return field.get_col(alias) # Try resolving all the names as fields first. If there's an error, # treat trailing names as lookups until a field can be resolved. last_field_exception = None for pivot in range(len(names), 0, -1): try: path, final_field, targets, rest = self.names_to_path( names[:pivot], opts, allow_many, fail_on_missing=True, ) except FieldError as exc: if pivot == 1: # The first item cannot be a lookup, so it's safe # to raise the field error here. raise else: last_field_exception = exc else: # The transforms are the remaining items that couldn't be # resolved into fields. transforms = names[pivot:] break for name in transforms: def transform(field, alias, *, name, previous): try: wrapped = previous(field, alias) return self.try_transform(wrapped, name) except FieldError: # FieldError is raised if the transform doesn't exist. if isinstance(final_field, Field) and last_field_exception: raise last_field_exception else: raise final_transformer = functools.partial(transform, name=name, previous=final_transformer) # Then, add the path to the query's joins. Note that we can't trim # joins at this stage - we will need the information about join type # of the trimmed joins. for join in path: if join.filtered_relation: filtered_relation = join.filtered_relation.clone() table_alias = filtered_relation.alias else: filtered_relation = None table_alias = None opts = join.to_opts if join.direct: nullable = self.is_nullable(join.join_field) else: nullable = True connection = Join( opts.db_table, alias, table_alias, INNER, join.join_field, nullable, filtered_relation=filtered_relation, ) reuse = can_reuse if join.m2m or reuse_with_filtered_relation else None alias = self.join( connection, reuse=reuse, reuse_with_filtered_relation=reuse_with_filtered_relation, ) joins.append(alias) if filtered_relation: filtered_relation.path = joins[:] return JoinInfo(final_field, targets, opts, joins, path, final_transformer) def trim_joins(self, targets, joins, path): """ The 'target' parameter is the final field being joined to, 'joins' is the full list of join aliases. The 'path' contain the PathInfos used to create the joins. Return the final target field and table alias and the new active joins. Always trim any direct join if the target column is already in the previous table. Can't trim reverse joins as it's unknown if there's anything on the other side of the join. """ joins = joins[:] for pos, info in enumerate(reversed(path)): if len(joins) == 1 or not info.direct: break if info.filtered_relation: break join_targets = {t.column for t in info.join_field.foreign_related_fields} cur_targets = {t.column for t in targets} if not cur_targets.issubset(join_targets): break targets_dict = {r[1].column: r[0] for r in info.join_field.related_fields if r[1].column in cur_targets} targets = tuple(targets_dict[t.column] for t in targets) self.unref_alias(joins.pop()) return targets, joins[-1], joins @classmethod def _gen_cols(cls, exprs): for expr in exprs: if isinstance(expr, Col): yield expr else: yield from cls._gen_cols(expr.get_source_expressions()) @classmethod def _gen_col_aliases(cls, exprs): yield from (expr.alias for expr in cls._gen_cols(exprs)) def resolve_ref(self, name, allow_joins=True, reuse=None, summarize=False): if not allow_joins and LOOKUP_SEP in name: raise FieldError("Joined field references are not permitted in this query") annotation = self.annotations.get(name) if annotation is not None: if not allow_joins: for alias in self._gen_col_aliases([annotation]): if isinstance(self.alias_map[alias], Join): raise FieldError( 'Joined field references are not permitted in ' 'this query' ) if summarize: # Summarize currently means we are doing an aggregate() query # which is executed as a wrapped subquery if any of the # aggregate() elements reference an existing annotation. In # that case we need to return a Ref to the subquery's annotation. return Ref(name, self.annotation_select[name]) else: return annotation else: field_list = name.split(LOOKUP_SEP) join_info = self.setup_joins(field_list, self.get_meta(), self.get_initial_alias(), can_reuse=reuse) targets, final_alias, join_list = self.trim_joins(join_info.targets, join_info.joins, join_info.path) if not allow_joins and len(join_list) > 1: raise FieldError('Joined field references are not permitted in this query') if len(targets) > 1: raise FieldError("Referencing multicolumn fields with F() objects " "isn't supported") # Verify that the last lookup in name is a field or a transform: # transform_function() raises FieldError if not. join_info.transform_function(targets[0], final_alias) if reuse is not None: reuse.update(join_list) return self._get_col(targets[0], join_info.targets[0], join_list[-1]) def split_exclude(self, filter_expr, can_reuse, names_with_path): """ When doing an exclude against any kind of N-to-many relation, we need to use a subquery. This method constructs the nested query, given the original exclude filter (filter_expr) and the portion up to the first N-to-many relation field. For example, if the origin filter is ~Q(child__name='foo'), filter_expr is ('child__name', 'foo') and can_reuse is a set of joins usable for filters in the original query. We will turn this into equivalent of: WHERE NOT (pk IN (SELECT parent_id FROM thetable WHERE name = 'foo' AND parent_id IS NOT NULL)) It might be worth it to consider using WHERE NOT EXISTS as that has saner null handling, and is easier for the backend's optimizer to handle. """ filter_lhs, filter_rhs = filter_expr if isinstance(filter_rhs, OuterRef): filter_expr = (filter_lhs, OuterRef(filter_rhs)) elif isinstance(filter_rhs, F): filter_expr = (filter_lhs, OuterRef(filter_rhs.name)) # Generate the inner query. query = Query(self.model) query._filtered_relations = self._filtered_relations query.add_filter(filter_expr) query.clear_ordering(True) # Try to have as simple as possible subquery -> trim leading joins from # the subquery. trimmed_prefix, contains_louter = query.trim_start(names_with_path) # Add extra check to make sure the selected field will not be null # since we are adding an IN <subquery> clause. This prevents the # database from tripping over IN (...,NULL,...) selects and returning # nothing col = query.select[0] select_field = col.target alias = col.alias if self.is_nullable(select_field): lookup_class = select_field.get_lookup('isnull') lookup = lookup_class(select_field.get_col(alias), False) query.where.add(lookup, AND) if alias in can_reuse: pk = select_field.model._meta.pk # Need to add a restriction so that outer query's filters are in effect for # the subquery, too. query.bump_prefix(self) lookup_class = select_field.get_lookup('exact') # Note that the query.select[0].alias is different from alias # due to bump_prefix above. lookup = lookup_class(pk.get_col(query.select[0].alias), pk.get_col(alias)) query.where.add(lookup, AND) query.external_aliases[alias] = True condition, needed_inner = self.build_filter( ('%s__in' % trimmed_prefix, query), current_negated=True, branch_negated=True, can_reuse=can_reuse) if contains_louter: or_null_condition, _ = self.build_filter( ('%s__isnull' % trimmed_prefix, True), current_negated=True, branch_negated=True, can_reuse=can_reuse) condition.add(or_null_condition, OR) # Note that the end result will be: # (outercol NOT IN innerq AND outercol IS NOT NULL) OR outercol IS NULL. # This might look crazy but due to how IN works, this seems to be # correct. If the IS NOT NULL check is removed then outercol NOT # IN will return UNKNOWN. If the IS NULL check is removed, then if # outercol IS NULL we will not match the row. return condition, needed_inner def set_empty(self): self.where.add(NothingNode(), AND) def is_empty(self): return any(isinstance(c, NothingNode) for c in self.where.children) def set_limits(self, low=None, high=None): """ Adjust the limits on the rows retrieved. Use low/high to set these, as it makes it more Pythonic to read and write. When the SQL query is created, convert them to the appropriate offset and limit values. Apply any limits passed in here to the existing constraints. Add low to the current low value and clamp both to any existing high value. """ if high is not None: if self.high_mark is not None: self.high_mark = min(self.high_mark, self.low_mark + high) else: self.high_mark = self.low_mark + high if low is not None: if self.high_mark is not None: self.low_mark = min(self.high_mark, self.low_mark + low) else: self.low_mark = self.low_mark + low if self.low_mark == self.high_mark: self.set_empty() def clear_limits(self): """Clear any existing limits.""" self.low_mark, self.high_mark = 0, None @property def is_sliced(self): return self.low_mark != 0 or self.high_mark is not None def has_limit_one(self): return self.high_mark is not None and (self.high_mark - self.low_mark) == 1 def can_filter(self): """ Return True if adding filters to this instance is still possible. Typically, this means no limits or offsets have been put on the results. """ return not self.is_sliced def clear_select_clause(self): """Remove all fields from SELECT clause.""" self.select = () self.default_cols = False self.select_related = False self.set_extra_mask(()) self.set_annotation_mask(()) def clear_select_fields(self): """ Clear the list of fields to select (but not extra_select columns). Some queryset types completely replace any existing list of select columns. """ self.select = () self.values_select = () def add_select_col(self, col): self.select += col, self.values_select += col.output_field.name, def set_select(self, cols): self.default_cols = False self.select = tuple(cols) def add_distinct_fields(self, *field_names): """ Add and resolve the given fields to the query's "distinct on" clause. """ self.distinct_fields = field_names self.distinct = True def add_fields(self, field_names, allow_m2m=True): """ Add the given (model) fields to the select set. Add the field names in the order specified. """ alias = self.get_initial_alias() opts = self.get_meta() try: cols = [] for name in field_names: # Join promotion note - we must not remove any rows here, so # if there is no existing joins, use outer join. join_info = self.setup_joins(name.split(LOOKUP_SEP), opts, alias, allow_many=allow_m2m) targets, final_alias, joins = self.trim_joins( join_info.targets, join_info.joins, join_info.path, ) for target in targets: cols.append(join_info.transform_function(target, final_alias)) if cols: self.set_select(cols) except MultiJoin: raise FieldError("Invalid field name: '%s'" % name) except FieldError: if LOOKUP_SEP in name: # For lookups spanning over relationships, show the error # from the model on which the lookup failed. raise else: names = sorted([ *get_field_names_from_opts(opts), *self.extra, *self.annotation_select, *self._filtered_relations ]) raise FieldError("Cannot resolve keyword %r into field. " "Choices are: %s" % (name, ", ".join(names))) def add_ordering(self, *ordering): """ Add items from the 'ordering' sequence to the query's "order by" clause. These items are either field names (not column names) -- possibly with a direction prefix ('-' or '?') -- or OrderBy expressions. If 'ordering' is empty, clear all ordering from the query. """ errors = [] for item in ordering: if not hasattr(item, 'resolve_expression') and not ORDER_PATTERN.match(item): errors.append(item) if getattr(item, 'contains_aggregate', False): raise FieldError( 'Using an aggregate in order_by() without also including ' 'it in annotate() is not allowed: %s' % item ) if errors: raise FieldError('Invalid order_by arguments: %s' % errors) if ordering: self.order_by += ordering else: self.default_ordering = False def clear_ordering(self, force_empty): """ Remove any ordering settings. If 'force_empty' is True, there will be no ordering in the resulting query (not even the model's default). """ self.order_by = () self.extra_order_by = () if force_empty: self.default_ordering = False def set_group_by(self, allow_aliases=True): """ Expand the GROUP BY clause required by the query. This will usually be the set of all non-aggregate fields in the return data. If the database backend supports grouping by the primary key, and the query would be equivalent, the optimization will be made automatically. """ group_by = list(self.select) if self.annotation_select: for alias, annotation in self.annotation_select.items(): signature = inspect.signature(annotation.get_group_by_cols) if 'alias' not in signature.parameters: annotation_class = annotation.__class__ msg = ( '`alias=None` must be added to the signature of ' '%s.%s.get_group_by_cols().' ) % (annotation_class.__module__, annotation_class.__qualname__) warnings.warn(msg, category=RemovedInDjango40Warning) group_by_cols = annotation.get_group_by_cols() else: if not allow_aliases: alias = None group_by_cols = annotation.get_group_by_cols(alias=alias) group_by.extend(group_by_cols) self.group_by = tuple(group_by) def add_select_related(self, fields): """ Set up the select_related data structure so that we only select certain related models (as opposed to all models, when self.select_related=True). """ if isinstance(self.select_related, bool): field_dict = {} else: field_dict = self.select_related for field in fields: d = field_dict for part in field.split(LOOKUP_SEP): d = d.setdefault(part, {}) self.select_related = field_dict def add_extra(self, select, select_params, where, params, tables, order_by): """ Add data to the various extra_* attributes for user-created additions to the query. """ if select: # We need to pair any placeholder markers in the 'select' # dictionary with their parameters in 'select_params' so that # subsequent updates to the select dictionary also adjust the # parameters appropriately. select_pairs = {} if select_params: param_iter = iter(select_params) else: param_iter = iter([]) for name, entry in select.items(): entry = str(entry) entry_params = [] pos = entry.find("%s") while pos != -1: if pos == 0 or entry[pos - 1] != '%': entry_params.append(next(param_iter)) pos = entry.find("%s", pos + 2) select_pairs[name] = (entry, entry_params) self.extra.update(select_pairs) if where or params: self.where.add(ExtraWhere(where, params), AND) if tables: self.extra_tables += tuple(tables) if order_by: self.extra_order_by = order_by def clear_deferred_loading(self): """Remove any fields from the deferred loading set.""" self.deferred_loading = (frozenset(), True) def add_deferred_loading(self, field_names): """ Add the given list of model field names to the set of fields to exclude from loading from the database when automatic column selection is done. Add the new field names to any existing field names that are deferred (or removed from any existing field names that are marked as the only ones for immediate loading). """ # Fields on related models are stored in the literal double-underscore # format, so that we can use a set datastructure. We do the foo__bar # splitting and handling when computing the SQL column names (as part of # get_columns()). existing, defer = self.deferred_loading if defer: # Add to existing deferred names. self.deferred_loading = existing.union(field_names), True else: # Remove names from the set of any existing "immediate load" names. self.deferred_loading = existing.difference(field_names), False def add_immediate_loading(self, field_names): """ Add the given list of model field names to the set of fields to retrieve when the SQL is executed ("immediate loading" fields). The field names replace any existing immediate loading field names. If there are field names already specified for deferred loading, remove those names from the new field_names before storing the new names for immediate loading. (That is, immediate loading overrides any existing immediate values, but respects existing deferrals.) """ existing, defer = self.deferred_loading field_names = set(field_names) if 'pk' in field_names: field_names.remove('pk') field_names.add(self.get_meta().pk.name) if defer: # Remove any existing deferred names from the current set before # setting the new names. self.deferred_loading = field_names.difference(existing), False else: # Replace any existing "immediate load" field names. self.deferred_loading = frozenset(field_names), False def get_loaded_field_names(self): """ If any fields are marked to be deferred, return a dictionary mapping models to a set of names in those fields that will be loaded. If a model is not in the returned dictionary, none of its fields are deferred. If no fields are marked for deferral, return an empty dictionary. """ # We cache this because we call this function multiple times # (compiler.fill_related_selections, query.iterator) try: return self._loaded_field_names_cache except AttributeError: collection = {} self.deferred_to_data(collection, self.get_loaded_field_names_cb) self._loaded_field_names_cache = collection return collection def get_loaded_field_names_cb(self, target, model, fields): """Callback used by get_deferred_field_names().""" target[model] = {f.attname for f in fields} def set_annotation_mask(self, names): """Set the mask of annotations that will be returned by the SELECT.""" if names is None: self.annotation_select_mask = None else: self.annotation_select_mask = set(names) self._annotation_select_cache = None def append_annotation_mask(self, names): if self.annotation_select_mask is not None: self.set_annotation_mask(self.annotation_select_mask.union(names)) def set_extra_mask(self, names): """ Set the mask of extra select items that will be returned by SELECT. Don't remove them from the Query since they might be used later. """ if names is None: self.extra_select_mask = None else: self.extra_select_mask = set(names) self._extra_select_cache = None def set_values(self, fields): self.select_related = False self.clear_deferred_loading() self.clear_select_fields() if self.group_by is True: self.add_fields((f.attname for f in self.model._meta.concrete_fields), False) # Disable GROUP BY aliases to avoid orphaning references to the # SELECT clause which is about to be cleared. self.set_group_by(allow_aliases=False) self.clear_select_fields() if fields: field_names = [] extra_names = [] annotation_names = [] if not self.extra and not self.annotations: # Shortcut - if there are no extra or annotations, then # the values() clause must be just field names. field_names = list(fields) else: self.default_cols = False for f in fields: if f in self.extra_select: extra_names.append(f) elif f in self.annotation_select: annotation_names.append(f) else: field_names.append(f) self.set_extra_mask(extra_names) self.set_annotation_mask(annotation_names) else: field_names = [f.attname for f in self.model._meta.concrete_fields] self.values_select = tuple(field_names) self.add_fields(field_names, True) @property def annotation_select(self): """ Return the dictionary of aggregate columns that are not masked and should be used in the SELECT clause. Cache this result for performance. """ if self._annotation_select_cache is not None: return self._annotation_select_cache elif not self.annotations: return {} elif self.annotation_select_mask is not None: self._annotation_select_cache = { k: v for k, v in self.annotations.items() if k in self.annotation_select_mask } return self._annotation_select_cache else: return self.annotations @property def extra_select(self): if self._extra_select_cache is not None: return self._extra_select_cache if not self.extra: return {} elif self.extra_select_mask is not None: self._extra_select_cache = { k: v for k, v in self.extra.items() if k in self.extra_select_mask } return self._extra_select_cache else: return self.extra def trim_start(self, names_with_path): """ Trim joins from the start of the join path. The candidates for trim are the PathInfos in names_with_path structure that are m2m joins. Also set the select column so the start matches the join. This method is meant to be used for generating the subquery joins & cols in split_exclude(). Return a lookup usable for doing outerq.filter(lookup=self) and a boolean indicating if the joins in the prefix contain a LEFT OUTER join. _""" all_paths = [] for _, paths in names_with_path: all_paths.extend(paths) contains_louter = False # Trim and operate only on tables that were generated for # the lookup part of the query. That is, avoid trimming # joins generated for F() expressions. lookup_tables = [ t for t in self.alias_map if t in self._lookup_joins or t == self.base_table ] for trimmed_paths, path in enumerate(all_paths): if path.m2m: break if self.alias_map[lookup_tables[trimmed_paths + 1]].join_type == LOUTER: contains_louter = True alias = lookup_tables[trimmed_paths] self.unref_alias(alias) # The path.join_field is a Rel, lets get the other side's field join_field = path.join_field.field # Build the filter prefix. paths_in_prefix = trimmed_paths trimmed_prefix = [] for name, path in names_with_path: if paths_in_prefix - len(path) < 0: break trimmed_prefix.append(name) paths_in_prefix -= len(path) trimmed_prefix.append( join_field.foreign_related_fields[0].name) trimmed_prefix = LOOKUP_SEP.join(trimmed_prefix) # Lets still see if we can trim the first join from the inner query # (that is, self). We can't do this for: # - LEFT JOINs because we would miss those rows that have nothing on # the outer side, # - INNER JOINs from filtered relations because we would miss their # filters. first_join = self.alias_map[lookup_tables[trimmed_paths + 1]] if first_join.join_type != LOUTER and not first_join.filtered_relation: select_fields = [r[0] for r in join_field.related_fields] select_alias = lookup_tables[trimmed_paths + 1] self.unref_alias(lookup_tables[trimmed_paths]) extra_restriction = join_field.get_extra_restriction( self.where_class, None, lookup_tables[trimmed_paths + 1]) if extra_restriction: self.where.add(extra_restriction, AND) else: # TODO: It might be possible to trim more joins from the start of the # inner query if it happens to have a longer join chain containing the # values in select_fields. Lets punt this one for now. select_fields = [r[1] for r in join_field.related_fields] select_alias = lookup_tables[trimmed_paths] # The found starting point is likely a Join instead of a BaseTable reference. # But the first entry in the query's FROM clause must not be a JOIN. for table in self.alias_map: if self.alias_refcount[table] > 0: self.alias_map[table] = BaseTable(self.alias_map[table].table_name, table) break self.set_select([f.get_col(select_alias) for f in select_fields]) return trimmed_prefix, contains_louter def is_nullable(self, field): """ Check if the given field should be treated as nullable. Some backends treat '' as null and Django treats such fields as nullable for those backends. In such situations field.null can be False even if we should treat the field as nullable. """ # We need to use DEFAULT_DB_ALIAS here, as QuerySet does not have # (nor should it have) knowledge of which connection is going to be # used. The proper fix would be to defer all decisions where # is_nullable() is needed to the compiler stage, but that is not easy # to do currently. return ( connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls and field.empty_strings_allowed ) or field.null def get_order_dir(field, default='ASC'): """ Return the field name and direction for an order specification. For example, '-foo' is returned as ('foo', 'DESC'). The 'default' param is used to indicate which way no prefix (or a '+' prefix) should sort. The '-' prefix always sorts the opposite way. """ dirn = ORDER_DIR[default] if field[0] == '-': return field[1:], dirn[1] return field, dirn[0] def add_to_dict(data, key, value): """ Add "value" to the set of values for "key", whether or not "key" already exists. """ if key in data: data[key].add(value) else: data[key] = {value} def is_reverse_o2o(field): """ Check if the given field is reverse-o2o. The field is expected to be some sort of relation field or related object. """ return field.is_relation and field.one_to_one and not field.concrete class JoinPromoter: """ A class to abstract away join promotion problems for complex filter conditions. """ def __init__(self, connector, num_children, negated): self.connector = connector self.negated = negated if self.negated: if connector == AND: self.effective_connector = OR else: self.effective_connector = AND else: self.effective_connector = self.connector self.num_children = num_children # Maps of table alias to how many times it is seen as required for # inner and/or outer joins. self.votes = Counter() def add_votes(self, votes): """ Add single vote per item to self.votes. Parameter can be any iterable. """ self.votes.update(votes) def update_join_types(self, query): """ Change join types so that the generated query is as efficient as possible, but still correct. So, change as many joins as possible to INNER, but don't make OUTER joins INNER if that could remove results from the query. """ to_promote = set() to_demote = set() # The effective_connector is used so that NOT (a AND b) is treated # similarly to (a OR b) for join promotion. for table, votes in self.votes.items(): # We must use outer joins in OR case when the join isn't contained # in all of the joins. Otherwise the INNER JOIN itself could remove # valid results. Consider the case where a model with rel_a and # rel_b relations is queried with rel_a__col=1 | rel_b__col=2. Now, # if rel_a join doesn't produce any results is null (for example # reverse foreign key or null value in direct foreign key), and # there is a matching row in rel_b with col=2, then an INNER join # to rel_a would remove a valid match from the query. So, we need # to promote any existing INNER to LOUTER (it is possible this # promotion in turn will be demoted later on). if self.effective_connector == 'OR' and votes < self.num_children: to_promote.add(table) # If connector is AND and there is a filter that can match only # when there is a joinable row, then use INNER. For example, in # rel_a__col=1 & rel_b__col=2, if either of the rels produce NULL # as join output, then the col=1 or col=2 can't match (as # NULL=anything is always false). # For the OR case, if all children voted for a join to be inner, # then we can use INNER for the join. For example: # (rel_a__col__icontains=Alex | rel_a__col__icontains=Russell) # then if rel_a doesn't produce any rows, the whole condition # can't match. Hence we can safely use INNER join. if self.effective_connector == 'AND' or ( self.effective_connector == 'OR' and votes == self.num_children): to_demote.add(table) # Finally, what happens in cases where we have: # (rel_a__col=1|rel_b__col=2) & rel_a__col__gte=0 # Now, we first generate the OR clause, and promote joins for it # in the first if branch above. Both rel_a and rel_b are promoted # to LOUTER joins. After that we do the AND case. The OR case # voted no inner joins but the rel_a__col__gte=0 votes inner join # for rel_a. We demote it back to INNER join (in AND case a single # vote is enough). The demotion is OK, if rel_a doesn't produce # rows, then the rel_a__col__gte=0 clause can't be true, and thus # the whole clause must be false. So, it is safe to use INNER # join. # Note that in this example we could just as well have the __gte # clause and the OR clause swapped. Or we could replace the __gte # clause with an OR clause containing rel_a__col=1|rel_a__col=2, # and again we could safely demote to INNER. query.promote_joins(to_promote) query.demote_joins(to_demote) return to_demote
91fed619fada909a63688911f201aedc3ac43dd887926b29a0fcbf38e0376c69
import datetime import uuid from functools import lru_cache from django.conf import settings from django.db.backends.base.operations import BaseDatabaseOperations from django.db.backends.utils import strip_quotes, truncate_name from django.db.models.expressions import Exists, ExpressionWrapper, RawSQL from django.db.models.sql.where import WhereNode from django.db.utils import DatabaseError from django.utils import timezone from django.utils.encoding import force_bytes, force_str from django.utils.functional import cached_property from django.utils.regex_helper import _lazy_re_compile from .base import Database from .utils import BulkInsertMapper, InsertVar, Oracle_datetime class DatabaseOperations(BaseDatabaseOperations): # Oracle uses NUMBER(5), NUMBER(11), and NUMBER(19) for integer fields. # SmallIntegerField uses NUMBER(11) instead of NUMBER(5), which is used by # SmallAutoField, to preserve backward compatibility. integer_field_ranges = { 'SmallIntegerField': (-99999999999, 99999999999), 'IntegerField': (-99999999999, 99999999999), 'BigIntegerField': (-9999999999999999999, 9999999999999999999), 'PositiveBigIntegerField': (0, 9999999999999999999), 'PositiveSmallIntegerField': (0, 99999999999), 'PositiveIntegerField': (0, 99999999999), 'SmallAutoField': (-99999, 99999), 'AutoField': (-99999999999, 99999999999), 'BigAutoField': (-9999999999999999999, 9999999999999999999), } set_operators = {**BaseDatabaseOperations.set_operators, 'difference': 'MINUS'} # TODO: colorize this SQL code with style.SQL_KEYWORD(), etc. _sequence_reset_sql = """ DECLARE table_value integer; seq_value integer; seq_name user_tab_identity_cols.sequence_name%%TYPE; BEGIN BEGIN SELECT sequence_name INTO seq_name FROM user_tab_identity_cols WHERE table_name = '%(table_name)s' AND column_name = '%(column_name)s'; EXCEPTION WHEN NO_DATA_FOUND THEN seq_name := '%(no_autofield_sequence_name)s'; END; SELECT NVL(MAX(%(column)s), 0) INTO table_value FROM %(table)s; SELECT NVL(last_number - cache_size, 0) INTO seq_value FROM user_sequences WHERE sequence_name = seq_name; WHILE table_value > seq_value LOOP EXECUTE IMMEDIATE 'SELECT "'||seq_name||'".nextval FROM DUAL' INTO seq_value; END LOOP; END; /""" # Oracle doesn't support string without precision; use the max string size. cast_char_field_without_max_length = 'NVARCHAR2(2000)' cast_data_types = { 'AutoField': 'NUMBER(11)', 'BigAutoField': 'NUMBER(19)', 'SmallAutoField': 'NUMBER(5)', 'TextField': cast_char_field_without_max_length, } def cache_key_culling_sql(self): return 'SELECT cache_key FROM %s ORDER BY cache_key OFFSET %%s ROWS FETCH FIRST 1 ROWS ONLY' def date_extract_sql(self, lookup_type, field_name): if lookup_type == 'week_day': # TO_CHAR(field, 'D') returns an integer from 1-7, where 1=Sunday. return "TO_CHAR(%s, 'D')" % field_name elif lookup_type == 'iso_week_day': return "TO_CHAR(%s - 1, 'D')" % field_name elif lookup_type == 'week': # IW = ISO week number return "TO_CHAR(%s, 'IW')" % field_name elif lookup_type == 'quarter': return "TO_CHAR(%s, 'Q')" % field_name elif lookup_type == 'iso_year': return "TO_CHAR(%s, 'IYYY')" % field_name else: # https://docs.oracle.com/en/database/oracle/oracle-database/18/sqlrf/EXTRACT-datetime.html return "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name) def date_trunc_sql(self, lookup_type, field_name): # https://docs.oracle.com/en/database/oracle/oracle-database/18/sqlrf/ROUND-and-TRUNC-Date-Functions.html if lookup_type in ('year', 'month'): return "TRUNC(%s, '%s')" % (field_name, lookup_type.upper()) elif lookup_type == 'quarter': return "TRUNC(%s, 'Q')" % field_name elif lookup_type == 'week': return "TRUNC(%s, 'IW')" % field_name else: return "TRUNC(%s)" % field_name # Oracle crashes with "ORA-03113: end-of-file on communication channel" # if the time zone name is passed in parameter. Use interpolation instead. # https://groups.google.com/forum/#!msg/django-developers/zwQju7hbG78/9l934yelwfsJ # This regexp matches all time zone names from the zoneinfo database. _tzname_re = _lazy_re_compile(r'^[\w/:+-]+$') def _prepare_tzname_delta(self, tzname): if '+' in tzname: return tzname[tzname.find('+'):] elif '-' in tzname: return tzname[tzname.find('-'):] return tzname def _convert_field_to_tz(self, field_name, tzname): if not settings.USE_TZ: return field_name if not self._tzname_re.match(tzname): raise ValueError("Invalid time zone name: %s" % tzname) # Convert from connection timezone to the local time, returning # TIMESTAMP WITH TIME ZONE and cast it back to TIMESTAMP to strip the # TIME ZONE details. if self.connection.timezone_name != tzname: return "CAST((FROM_TZ(%s, '%s') AT TIME ZONE '%s') AS TIMESTAMP)" % ( field_name, self.connection.timezone_name, self._prepare_tzname_delta(tzname), ) return field_name def datetime_cast_date_sql(self, field_name, tzname): field_name = self._convert_field_to_tz(field_name, tzname) return 'TRUNC(%s)' % field_name def datetime_cast_time_sql(self, field_name, tzname): # Since `TimeField` values are stored as TIMESTAMP where only the date # part is ignored, convert the field to the specified timezone. return self._convert_field_to_tz(field_name, tzname) def datetime_extract_sql(self, lookup_type, field_name, tzname): field_name = self._convert_field_to_tz(field_name, tzname) return self.date_extract_sql(lookup_type, field_name) def datetime_trunc_sql(self, lookup_type, field_name, tzname): field_name = self._convert_field_to_tz(field_name, tzname) # https://docs.oracle.com/en/database/oracle/oracle-database/18/sqlrf/ROUND-and-TRUNC-Date-Functions.html if lookup_type in ('year', 'month'): sql = "TRUNC(%s, '%s')" % (field_name, lookup_type.upper()) elif lookup_type == 'quarter': sql = "TRUNC(%s, 'Q')" % field_name elif lookup_type == 'week': sql = "TRUNC(%s, 'IW')" % field_name elif lookup_type == 'day': sql = "TRUNC(%s)" % field_name elif lookup_type == 'hour': sql = "TRUNC(%s, 'HH24')" % field_name elif lookup_type == 'minute': sql = "TRUNC(%s, 'MI')" % field_name else: sql = "CAST(%s AS DATE)" % field_name # Cast to DATE removes sub-second precision. return sql def time_trunc_sql(self, lookup_type, field_name): # The implementation is similar to `datetime_trunc_sql` as both # `DateTimeField` and `TimeField` are stored as TIMESTAMP where # the date part of the later is ignored. if lookup_type == 'hour': sql = "TRUNC(%s, 'HH24')" % field_name elif lookup_type == 'minute': sql = "TRUNC(%s, 'MI')" % field_name elif lookup_type == 'second': sql = "CAST(%s AS DATE)" % field_name # Cast to DATE removes sub-second precision. return sql def get_db_converters(self, expression): converters = super().get_db_converters(expression) internal_type = expression.output_field.get_internal_type() if internal_type == 'TextField': converters.append(self.convert_textfield_value) elif internal_type == 'BinaryField': converters.append(self.convert_binaryfield_value) elif internal_type in ['BooleanField', 'NullBooleanField']: converters.append(self.convert_booleanfield_value) elif internal_type == 'DateTimeField': if settings.USE_TZ: converters.append(self.convert_datetimefield_value) elif internal_type == 'DateField': converters.append(self.convert_datefield_value) elif internal_type == 'TimeField': converters.append(self.convert_timefield_value) elif internal_type == 'UUIDField': converters.append(self.convert_uuidfield_value) # Oracle stores empty strings as null. If the field accepts the empty # string, undo this to adhere to the Django convention of using # the empty string instead of null. if expression.field.empty_strings_allowed: converters.append( self.convert_empty_bytes if internal_type == 'BinaryField' else self.convert_empty_string ) return converters def convert_textfield_value(self, value, expression, connection): if isinstance(value, Database.LOB): value = value.read() return value def convert_binaryfield_value(self, value, expression, connection): if isinstance(value, Database.LOB): value = force_bytes(value.read()) return value def convert_booleanfield_value(self, value, expression, connection): if value in (0, 1): value = bool(value) return value # cx_Oracle always returns datetime.datetime objects for # DATE and TIMESTAMP columns, but Django wants to see a # python datetime.date, .time, or .datetime. def convert_datetimefield_value(self, value, expression, connection): if value is not None: value = timezone.make_aware(value, self.connection.timezone) return value def convert_datefield_value(self, value, expression, connection): if isinstance(value, Database.Timestamp): value = value.date() return value def convert_timefield_value(self, value, expression, connection): if isinstance(value, Database.Timestamp): value = value.time() return value def convert_uuidfield_value(self, value, expression, connection): if value is not None: value = uuid.UUID(value) return value @staticmethod def convert_empty_string(value, expression, connection): return '' if value is None else value @staticmethod def convert_empty_bytes(value, expression, connection): return b'' if value is None else value def deferrable_sql(self): return " DEFERRABLE INITIALLY DEFERRED" def fetch_returned_insert_columns(self, cursor, returning_params): for param in returning_params: value = param.get_value() if value is None or value == []: # cx_Oracle < 6.3 returns None, >= 6.3 returns empty list. raise DatabaseError( 'The database did not return a new row id. Probably ' '"ORA-1403: no data found" was raised internally but was ' 'hidden by the Oracle OCI library (see ' 'https://code.djangoproject.com/ticket/28859).' ) # cx_Oracle < 7 returns value, >= 7 returns list with single value. yield value[0] if isinstance(value, list) else value def field_cast_sql(self, db_type, internal_type): if db_type and db_type.endswith('LOB'): return "DBMS_LOB.SUBSTR(%s)" else: return "%s" def no_limit_value(self): return None def limit_offset_sql(self, low_mark, high_mark): fetch, offset = self._get_limit_offset_params(low_mark, high_mark) return ' '.join(sql for sql in ( ('OFFSET %d ROWS' % offset) if offset else None, ('FETCH FIRST %d ROWS ONLY' % fetch) if fetch else None, ) if sql) def last_executed_query(self, cursor, sql, params): # https://cx-oracle.readthedocs.io/en/latest/cursor.html#Cursor.statement # The DB API definition does not define this attribute. statement = cursor.statement # Unlike Psycopg's `query` and MySQLdb`'s `_executed`, cx_Oracle's # `statement` doesn't contain the query parameters. Substitute # parameters manually. if isinstance(params, (tuple, list)): for i, param in enumerate(params): statement = statement.replace(':arg%d' % i, force_str(param, errors='replace')) elif isinstance(params, dict): for key, param in params.items(): statement = statement.replace(':%s' % key, force_str(param, errors='replace')) return statement def last_insert_id(self, cursor, table_name, pk_name): sq_name = self._get_sequence_name(cursor, strip_quotes(table_name), pk_name) cursor.execute('"%s".currval' % sq_name) return cursor.fetchone()[0] def lookup_cast(self, lookup_type, internal_type=None): if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'): return "UPPER(%s)" return "%s" def max_in_list_size(self): return 1000 def max_name_length(self): return 30 def pk_default_value(self): return "NULL" def prep_for_iexact_query(self, x): return x def process_clob(self, value): if value is None: return '' return value.read() def quote_name(self, name): # SQL92 requires delimited (quoted) names to be case-sensitive. When # not quoted, Oracle has case-insensitive behavior for identifiers, but # always defaults to uppercase. # We simplify things by making Oracle identifiers always uppercase. if not name.startswith('"') and not name.endswith('"'): name = '"%s"' % truncate_name(name.upper(), self.max_name_length()) # Oracle puts the query text into a (query % args) construct, so % signs # in names need to be escaped. The '%%' will be collapsed back to '%' at # that stage so we aren't really making the name longer here. name = name.replace('%', '%%') return name.upper() def random_function_sql(self): return "DBMS_RANDOM.RANDOM" def regex_lookup(self, lookup_type): if lookup_type == 'regex': match_option = "'c'" else: match_option = "'i'" return 'REGEXP_LIKE(%%s, %%s, %s)' % match_option def return_insert_columns(self, fields): if not fields: return '', () field_names = [] params = [] for field in fields: field_names.append('%s.%s' % ( self.quote_name(field.model._meta.db_table), self.quote_name(field.column), )) params.append(InsertVar(field)) return 'RETURNING %s INTO %s' % ( ', '.join(field_names), ', '.join(['%s'] * len(params)), ), tuple(params) def __foreign_key_constraints(self, table_name, recursive): with self.connection.cursor() as cursor: if recursive: cursor.execute(""" SELECT user_tables.table_name, rcons.constraint_name FROM user_tables JOIN user_constraints cons ON (user_tables.table_name = cons.table_name AND cons.constraint_type = ANY('P', 'U')) LEFT JOIN user_constraints rcons ON (user_tables.table_name = rcons.table_name AND rcons.constraint_type = 'R') START WITH user_tables.table_name = UPPER(%s) CONNECT BY NOCYCLE PRIOR cons.constraint_name = rcons.r_constraint_name GROUP BY user_tables.table_name, rcons.constraint_name HAVING user_tables.table_name != UPPER(%s) ORDER BY MAX(level) DESC """, (table_name, table_name)) else: cursor.execute(""" SELECT cons.table_name, cons.constraint_name FROM user_constraints cons WHERE cons.constraint_type = 'R' AND cons.table_name = UPPER(%s) """, (table_name,)) return cursor.fetchall() @cached_property def _foreign_key_constraints(self): # 512 is large enough to fit the ~330 tables (as of this writing) in # Django's test suite. return lru_cache(maxsize=512)(self.__foreign_key_constraints) def sql_flush(self, style, tables, sequences, allow_cascade=False): if tables: truncated_tables = {table.upper() for table in tables} constraints = set() # Oracle's TRUNCATE CASCADE only works with ON DELETE CASCADE # foreign keys which Django doesn't define. Emulate the # PostgreSQL behavior which truncates all dependent tables by # manually retrieving all foreign key constraints and resolving # dependencies. for table in tables: for foreign_table, constraint in self._foreign_key_constraints(table, recursive=allow_cascade): if allow_cascade: truncated_tables.add(foreign_table) constraints.add((foreign_table, constraint)) sql = [ "%s %s %s %s %s %s %s %s;" % ( style.SQL_KEYWORD('ALTER'), style.SQL_KEYWORD('TABLE'), style.SQL_FIELD(self.quote_name(table)), style.SQL_KEYWORD('DISABLE'), style.SQL_KEYWORD('CONSTRAINT'), style.SQL_FIELD(self.quote_name(constraint)), style.SQL_KEYWORD('KEEP'), style.SQL_KEYWORD('INDEX'), ) for table, constraint in constraints ] + [ "%s %s %s;" % ( style.SQL_KEYWORD('TRUNCATE'), style.SQL_KEYWORD('TABLE'), style.SQL_FIELD(self.quote_name(table)), ) for table in truncated_tables ] + [ "%s %s %s %s %s %s;" % ( style.SQL_KEYWORD('ALTER'), style.SQL_KEYWORD('TABLE'), style.SQL_FIELD(self.quote_name(table)), style.SQL_KEYWORD('ENABLE'), style.SQL_KEYWORD('CONSTRAINT'), style.SQL_FIELD(self.quote_name(constraint)), ) for table, constraint in constraints ] # Since we've just deleted all the rows, running our sequence # ALTER code will reset the sequence to 0. sql.extend(self.sequence_reset_by_name_sql(style, sequences)) return sql else: return [] def sequence_reset_by_name_sql(self, style, sequences): sql = [] for sequence_info in sequences: no_autofield_sequence_name = self._get_no_autofield_sequence_name(sequence_info['table']) table = self.quote_name(sequence_info['table']) column = self.quote_name(sequence_info['column'] or 'id') query = self._sequence_reset_sql % { 'no_autofield_sequence_name': no_autofield_sequence_name, 'table': table, 'column': column, 'table_name': strip_quotes(table), 'column_name': strip_quotes(column), } sql.append(query) return sql def sequence_reset_sql(self, style, model_list): from django.db import models output = [] query = self._sequence_reset_sql for model in model_list: for f in model._meta.local_fields: if isinstance(f, models.AutoField): no_autofield_sequence_name = self._get_no_autofield_sequence_name(model._meta.db_table) table = self.quote_name(model._meta.db_table) column = self.quote_name(f.column) output.append(query % { 'no_autofield_sequence_name': no_autofield_sequence_name, 'table': table, 'column': column, 'table_name': strip_quotes(table), 'column_name': strip_quotes(column), }) # Only one AutoField is allowed per model, so don't # continue to loop break for f in model._meta.many_to_many: if not f.remote_field.through: no_autofield_sequence_name = self._get_no_autofield_sequence_name(f.m2m_db_table()) table = self.quote_name(f.m2m_db_table()) column = self.quote_name('id') output.append(query % { 'no_autofield_sequence_name': no_autofield_sequence_name, 'table': table, 'column': column, 'table_name': strip_quotes(table), 'column_name': 'ID', }) return output def start_transaction_sql(self): return '' def tablespace_sql(self, tablespace, inline=False): if inline: return "USING INDEX TABLESPACE %s" % self.quote_name(tablespace) else: return "TABLESPACE %s" % self.quote_name(tablespace) def adapt_datefield_value(self, value): """ Transform a date value to an object compatible with what is expected by the backend driver for date columns. The default implementation transforms the date to text, but that is not necessary for Oracle. """ return value def adapt_datetimefield_value(self, value): """ Transform a datetime value to an object compatible with what is expected by the backend driver for datetime columns. If naive datetime is passed assumes that is in UTC. Normally Django models.DateTimeField makes sure that if USE_TZ is True passed datetime is timezone aware. """ if value is None: return None # Expression values are adapted by the database. if hasattr(value, 'resolve_expression'): return value # cx_Oracle doesn't support tz-aware datetimes if timezone.is_aware(value): if settings.USE_TZ: value = timezone.make_naive(value, self.connection.timezone) else: raise ValueError("Oracle backend does not support timezone-aware datetimes when USE_TZ is False.") return Oracle_datetime.from_datetime(value) def adapt_timefield_value(self, value): if value is None: return None # Expression values are adapted by the database. if hasattr(value, 'resolve_expression'): return value if isinstance(value, str): return datetime.datetime.strptime(value, '%H:%M:%S') # Oracle doesn't support tz-aware times if timezone.is_aware(value): raise ValueError("Oracle backend does not support timezone-aware times.") return Oracle_datetime(1900, 1, 1, value.hour, value.minute, value.second, value.microsecond) def combine_expression(self, connector, sub_expressions): lhs, rhs = sub_expressions if connector == '%%': return 'MOD(%s)' % ','.join(sub_expressions) elif connector == '&': return 'BITAND(%s)' % ','.join(sub_expressions) elif connector == '|': return 'BITAND(-%(lhs)s-1,%(rhs)s)+%(lhs)s' % {'lhs': lhs, 'rhs': rhs} elif connector == '<<': return '(%(lhs)s * POWER(2, %(rhs)s))' % {'lhs': lhs, 'rhs': rhs} elif connector == '>>': return 'FLOOR(%(lhs)s / POWER(2, %(rhs)s))' % {'lhs': lhs, 'rhs': rhs} elif connector == '^': return 'POWER(%s)' % ','.join(sub_expressions) return super().combine_expression(connector, sub_expressions) def _get_no_autofield_sequence_name(self, table): """ Manually created sequence name to keep backward compatibility for AutoFields that aren't Oracle identity columns. """ name_length = self.max_name_length() - 3 return '%s_SQ' % truncate_name(strip_quotes(table), name_length).upper() def _get_sequence_name(self, cursor, table, pk_name): cursor.execute(""" SELECT sequence_name FROM user_tab_identity_cols WHERE table_name = UPPER(%s) AND column_name = UPPER(%s)""", [table, pk_name]) row = cursor.fetchone() return self._get_no_autofield_sequence_name(table) if row is None else row[0] def bulk_insert_sql(self, fields, placeholder_rows): query = [] for row in placeholder_rows: select = [] for i, placeholder in enumerate(row): # A model without any fields has fields=[None]. if fields[i]: internal_type = getattr(fields[i], 'target_field', fields[i]).get_internal_type() placeholder = BulkInsertMapper.types.get(internal_type, '%s') % placeholder # Add columns aliases to the first select to avoid "ORA-00918: # column ambiguously defined" when two or more columns in the # first select have the same value. if not query: placeholder = '%s col_%s' % (placeholder, i) select.append(placeholder) query.append('SELECT %s FROM DUAL' % ', '.join(select)) # Bulk insert to tables with Oracle identity columns causes Oracle to # add sequence.nextval to it. Sequence.nextval cannot be used with the # UNION operator. To prevent incorrect SQL, move UNION to a subquery. return 'SELECT * FROM (%s)' % ' UNION ALL '.join(query) def subtract_temporals(self, internal_type, lhs, rhs): if internal_type == 'DateField': lhs_sql, lhs_params = lhs rhs_sql, rhs_params = rhs params = (*lhs_params, *rhs_params) return "NUMTODSINTERVAL(TO_NUMBER(%s - %s), 'DAY')" % (lhs_sql, rhs_sql), params return super().subtract_temporals(internal_type, lhs, rhs) def bulk_batch_size(self, fields, objs): """Oracle restricts the number of parameters in a query.""" if fields: return self.connection.features.max_query_params // len(fields) return len(objs) def conditional_expression_supported_in_where_clause(self, expression): """ Oracle supports only EXISTS(...) or filters in the WHERE clause, others must be compared with True. """ if isinstance(expression, (Exists, WhereNode)): return True if isinstance(expression, ExpressionWrapper) and expression.conditional: return self.conditional_expression_supported_in_where_clause(expression.expression) if isinstance(expression, RawSQL) and expression.conditional: return True return False
8bd406902cf507ab00d4c7273b90001a7b3970b4e79ebc361beb8e90c7517601
import datetime import decimal from importlib import import_module import sqlparse from django.conf import settings from django.db import NotSupportedError, transaction from django.db.backends import utils from django.utils import timezone from django.utils.encoding import force_str class BaseDatabaseOperations: """ Encapsulate backend-specific differences, such as the way a backend performs ordering or calculates the ID of a recently-inserted row. """ compiler_module = "django.db.models.sql.compiler" # Integer field safe ranges by `internal_type` as documented # in docs/ref/models/fields.txt. integer_field_ranges = { 'SmallIntegerField': (-32768, 32767), 'IntegerField': (-2147483648, 2147483647), 'BigIntegerField': (-9223372036854775808, 9223372036854775807), 'PositiveBigIntegerField': (0, 9223372036854775807), 'PositiveSmallIntegerField': (0, 32767), 'PositiveIntegerField': (0, 2147483647), 'SmallAutoField': (-32768, 32767), 'AutoField': (-2147483648, 2147483647), 'BigAutoField': (-9223372036854775808, 9223372036854775807), } set_operators = { 'union': 'UNION', 'intersection': 'INTERSECT', 'difference': 'EXCEPT', } # Mapping of Field.get_internal_type() (typically the model field's class # name) to the data type to use for the Cast() function, if different from # DatabaseWrapper.data_types. cast_data_types = {} # CharField data type if the max_length argument isn't provided. cast_char_field_without_max_length = None # Start and end points for window expressions. PRECEDING = 'PRECEDING' FOLLOWING = 'FOLLOWING' UNBOUNDED_PRECEDING = 'UNBOUNDED ' + PRECEDING UNBOUNDED_FOLLOWING = 'UNBOUNDED ' + FOLLOWING CURRENT_ROW = 'CURRENT ROW' # Prefix for EXPLAIN queries, or None EXPLAIN isn't supported. explain_prefix = None def __init__(self, connection): self.connection = connection self._cache = None def autoinc_sql(self, table, column): """ Return any SQL needed to support auto-incrementing primary keys, or None if no SQL is necessary. This SQL is executed when a table is created. """ return None def bulk_batch_size(self, fields, objs): """ Return the maximum allowed batch size for the backend. The fields are the fields going to be inserted in the batch, the objs contains all the objects to be inserted. """ return len(objs) def cache_key_culling_sql(self): """ Return an SQL query that retrieves the first cache key greater than the n smallest. This is used by the 'db' cache backend to determine where to start culling. """ return "SELECT cache_key FROM %s ORDER BY cache_key LIMIT 1 OFFSET %%s" def unification_cast_sql(self, output_field): """ Given a field instance, return the SQL that casts the result of a union to that type. The resulting string should contain a '%s' placeholder for the expression being cast. """ return '%s' def date_extract_sql(self, lookup_type, field_name): """ Given a lookup_type of 'year', 'month', or 'day', return the SQL that extracts a value from the given date field field_name. """ raise NotImplementedError('subclasses of BaseDatabaseOperations may require a date_extract_sql() method') def date_interval_sql(self, timedelta): """ Implement the date interval functionality for expressions. """ raise NotImplementedError('subclasses of BaseDatabaseOperations may require a date_interval_sql() method') def date_trunc_sql(self, lookup_type, field_name): """ Given a lookup_type of 'year', 'month', or 'day', return the SQL that truncates the given date field field_name to a date object with only the given specificity. """ raise NotImplementedError('subclasses of BaseDatabaseOperations may require a date_trunc_sql() method.') def datetime_cast_date_sql(self, field_name, tzname): """ Return the SQL to cast a datetime value to date value. """ raise NotImplementedError( 'subclasses of BaseDatabaseOperations may require a ' 'datetime_cast_date_sql() method.' ) def datetime_cast_time_sql(self, field_name, tzname): """ Return the SQL to cast a datetime value to time value. """ raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetime_cast_time_sql() method') def datetime_extract_sql(self, lookup_type, field_name, tzname): """ Given a lookup_type of 'year', 'month', 'day', 'hour', 'minute', or 'second', return the SQL that extracts a value from the given datetime field field_name. """ raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetime_extract_sql() method') def datetime_trunc_sql(self, lookup_type, field_name, tzname): """ Given a lookup_type of 'year', 'month', 'day', 'hour', 'minute', or 'second', return the SQL that truncates the given datetime field field_name to a datetime object with only the given specificity. """ raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetime_trunc_sql() method') def time_trunc_sql(self, lookup_type, field_name): """ Given a lookup_type of 'hour', 'minute' or 'second', return the SQL that truncates the given time field field_name to a time object with only the given specificity. """ raise NotImplementedError('subclasses of BaseDatabaseOperations may require a time_trunc_sql() method') def time_extract_sql(self, lookup_type, field_name): """ Given a lookup_type of 'hour', 'minute', or 'second', return the SQL that extracts a value from the given time field field_name. """ return self.date_extract_sql(lookup_type, field_name) def deferrable_sql(self): """ Return the SQL to make a constraint "initially deferred" during a CREATE TABLE statement. """ return '' def distinct_sql(self, fields, params): """ Return an SQL DISTINCT clause which removes duplicate rows from the result set. If any fields are given, only check the given fields for duplicates. """ if fields: raise NotSupportedError('DISTINCT ON fields is not supported by this database backend') else: return ['DISTINCT'], [] def fetch_returned_insert_columns(self, cursor, returning_params): """ Given a cursor object that has just performed an INSERT...RETURNING statement into a table, return the newly created data. """ return cursor.fetchone() def field_cast_sql(self, db_type, internal_type): """ Given a column type (e.g. 'BLOB', 'VARCHAR') and an internal type (e.g. 'GenericIPAddressField'), return the SQL to cast it before using it in a WHERE statement. The resulting string should contain a '%s' placeholder for the column being searched against. """ return '%s' def force_no_ordering(self): """ Return a list used in the "ORDER BY" clause to force no ordering at all. Return an empty list to include nothing in the ordering. """ return [] def for_update_sql(self, nowait=False, skip_locked=False, of=()): """ Return the FOR UPDATE SQL clause to lock rows for an update operation. """ return 'FOR UPDATE%s%s%s' % ( ' OF %s' % ', '.join(of) if of else '', ' NOWAIT' if nowait else '', ' SKIP LOCKED' if skip_locked else '', ) def _get_limit_offset_params(self, low_mark, high_mark): offset = low_mark or 0 if high_mark is not None: return (high_mark - offset), offset elif offset: return self.connection.ops.no_limit_value(), offset return None, offset def limit_offset_sql(self, low_mark, high_mark): """Return LIMIT/OFFSET SQL clause.""" limit, offset = self._get_limit_offset_params(low_mark, high_mark) return ' '.join(sql for sql in ( ('LIMIT %d' % limit) if limit else None, ('OFFSET %d' % offset) if offset else None, ) if sql) def last_executed_query(self, cursor, sql, params): """ Return a string of the query last executed by the given cursor, with placeholders replaced with actual values. `sql` is the raw query containing placeholders and `params` is the sequence of parameters. These are used by default, but this method exists for database backends to provide a better implementation according to their own quoting schemes. """ # Convert params to contain string values. def to_string(s): return force_str(s, strings_only=True, errors='replace') if isinstance(params, (list, tuple)): u_params = tuple(to_string(val) for val in params) elif params is None: u_params = () else: u_params = {to_string(k): to_string(v) for k, v in params.items()} return "QUERY = %r - PARAMS = %r" % (sql, u_params) def last_insert_id(self, cursor, table_name, pk_name): """ Given a cursor object that has just performed an INSERT statement into a table that has an auto-incrementing ID, return the newly created ID. `pk_name` is the name of the primary-key column. """ return cursor.lastrowid def lookup_cast(self, lookup_type, internal_type=None): """ Return the string to use in a query when performing lookups ("contains", "like", etc.). It should contain a '%s' placeholder for the column being searched against. """ return "%s" def max_in_list_size(self): """ Return the maximum number of items that can be passed in a single 'IN' list condition, or None if the backend does not impose a limit. """ return None def max_name_length(self): """ Return the maximum length of table and column names, or None if there is no limit. """ return None def no_limit_value(self): """ Return the value to use for the LIMIT when we are wanting "LIMIT infinity". Return None if the limit clause can be omitted in this case. """ raise NotImplementedError('subclasses of BaseDatabaseOperations may require a no_limit_value() method') def pk_default_value(self): """ Return the value to use during an INSERT statement to specify that the field should use its default value. """ return 'DEFAULT' def prepare_sql_script(self, sql): """ Take an SQL script that may contain multiple lines and return a list of statements to feed to successive cursor.execute() calls. Since few databases are able to process raw SQL scripts in a single cursor.execute() call and PEP 249 doesn't talk about this use case, the default implementation is conservative. """ return [ sqlparse.format(statement, strip_comments=True) for statement in sqlparse.split(sql) if statement ] def process_clob(self, value): """ Return the value of a CLOB column, for backends that return a locator object that requires additional processing. """ return value def return_insert_columns(self, fields): """ For backends that support returning columns as part of an insert query, return the SQL and params to append to the INSERT query. The returned fragment should contain a format string to hold the appropriate column. """ pass def compiler(self, compiler_name): """ Return the SQLCompiler class corresponding to the given name, in the namespace corresponding to the `compiler_module` attribute on this backend. """ if self._cache is None: self._cache = import_module(self.compiler_module) return getattr(self._cache, compiler_name) def quote_name(self, name): """ Return a quoted version of the given table, index, or column name. Do not quote the given name if it's already been quoted. """ raise NotImplementedError('subclasses of BaseDatabaseOperations may require a quote_name() method') def random_function_sql(self): """Return an SQL expression that returns a random value.""" return 'RANDOM()' def regex_lookup(self, lookup_type): """ Return the string to use in a query when performing regular expression lookups (using "regex" or "iregex"). It should contain a '%s' placeholder for the column being searched against. If the feature is not supported (or part of it is not supported), raise NotImplementedError. """ raise NotImplementedError('subclasses of BaseDatabaseOperations may require a regex_lookup() method') def savepoint_create_sql(self, sid): """ Return the SQL for starting a new savepoint. Only required if the "uses_savepoints" feature is True. The "sid" parameter is a string for the savepoint id. """ return "SAVEPOINT %s" % self.quote_name(sid) def savepoint_commit_sql(self, sid): """ Return the SQL for committing the given savepoint. """ return "RELEASE SAVEPOINT %s" % self.quote_name(sid) def savepoint_rollback_sql(self, sid): """ Return the SQL for rolling back the given savepoint. """ return "ROLLBACK TO SAVEPOINT %s" % self.quote_name(sid) def set_time_zone_sql(self): """ Return the SQL that will set the connection's time zone. Return '' if the backend doesn't support time zones. """ return '' def sql_flush(self, style, tables, sequences, allow_cascade=False): """ Return a list of SQL statements required to remove all data from the given database tables (without actually removing the tables themselves) and the SQL statements required to reset the sequences passed in `sequences`. The `style` argument is a Style object as returned by either color_style() or no_style() in django.core.management.color. The `allow_cascade` argument determines whether truncation may cascade to tables with foreign keys pointing the tables being truncated. PostgreSQL requires a cascade even if these tables are empty. """ raise NotImplementedError('subclasses of BaseDatabaseOperations must provide a sql_flush() method') def execute_sql_flush(self, using, sql_list): """Execute a list of SQL statements to flush the database.""" with transaction.atomic(using=using, savepoint=self.connection.features.can_rollback_ddl): with self.connection.cursor() as cursor: for sql in sql_list: cursor.execute(sql) def sequence_reset_by_name_sql(self, style, sequences): """ Return a list of the SQL statements required to reset sequences passed in `sequences`. The `style` argument is a Style object as returned by either color_style() or no_style() in django.core.management.color. """ return [] def sequence_reset_sql(self, style, model_list): """ Return a list of the SQL statements required to reset sequences for the given models. The `style` argument is a Style object as returned by either color_style() or no_style() in django.core.management.color. """ return [] # No sequence reset required by default. def start_transaction_sql(self): """Return the SQL statement required to start a transaction.""" return "BEGIN;" def end_transaction_sql(self, success=True): """Return the SQL statement required to end a transaction.""" if not success: return "ROLLBACK;" return "COMMIT;" def tablespace_sql(self, tablespace, inline=False): """ Return the SQL that will be used in a query to define the tablespace. Return '' if the backend doesn't support tablespaces. If `inline` is True, append the SQL to a row; otherwise append it to the entire CREATE TABLE or CREATE INDEX statement. """ return '' def prep_for_like_query(self, x): """Prepare a value for use in a LIKE query.""" return str(x).replace("\\", "\\\\").replace("%", r"\%").replace("_", r"\_") # Same as prep_for_like_query(), but called for "iexact" matches, which # need not necessarily be implemented using "LIKE" in the backend. prep_for_iexact_query = prep_for_like_query def validate_autopk_value(self, value): """ Certain backends do not accept some values for "serial" fields (for example zero in MySQL). Raise a ValueError if the value is invalid, otherwise return the validated value. """ return value def adapt_unknown_value(self, value): """ Transform a value to something compatible with the backend driver. This method only depends on the type of the value. It's designed for cases where the target type isn't known, such as .raw() SQL queries. As a consequence it may not work perfectly in all circumstances. """ if isinstance(value, datetime.datetime): # must be before date return self.adapt_datetimefield_value(value) elif isinstance(value, datetime.date): return self.adapt_datefield_value(value) elif isinstance(value, datetime.time): return self.adapt_timefield_value(value) elif isinstance(value, decimal.Decimal): return self.adapt_decimalfield_value(value) else: return value def adapt_datefield_value(self, value): """ Transform a date value to an object compatible with what is expected by the backend driver for date columns. """ if value is None: return None return str(value) def adapt_datetimefield_value(self, value): """ Transform a datetime value to an object compatible with what is expected by the backend driver for datetime columns. """ if value is None: return None return str(value) def adapt_timefield_value(self, value): """ Transform a time value to an object compatible with what is expected by the backend driver for time columns. """ if value is None: return None if timezone.is_aware(value): raise ValueError("Django does not support timezone-aware times.") return str(value) def adapt_decimalfield_value(self, value, max_digits=None, decimal_places=None): """ Transform a decimal.Decimal value to an object compatible with what is expected by the backend driver for decimal (numeric) columns. """ return utils.format_number(value, max_digits, decimal_places) def adapt_ipaddressfield_value(self, value): """ Transform a string representation of an IP address into the expected type for the backend driver. """ return value or None def year_lookup_bounds_for_date_field(self, value): """ Return a two-elements list with the lower and upper bound to be used with a BETWEEN operator to query a DateField value using a year lookup. `value` is an int, containing the looked-up year. """ first = datetime.date(value, 1, 1) second = datetime.date(value, 12, 31) first = self.adapt_datefield_value(first) second = self.adapt_datefield_value(second) return [first, second] def year_lookup_bounds_for_datetime_field(self, value): """ Return a two-elements list with the lower and upper bound to be used with a BETWEEN operator to query a DateTimeField value using a year lookup. `value` is an int, containing the looked-up year. """ first = datetime.datetime(value, 1, 1) second = datetime.datetime(value, 12, 31, 23, 59, 59, 999999) if settings.USE_TZ: tz = timezone.get_current_timezone() first = timezone.make_aware(first, tz) second = timezone.make_aware(second, tz) first = self.adapt_datetimefield_value(first) second = self.adapt_datetimefield_value(second) return [first, second] def get_db_converters(self, expression): """ Return a list of functions needed to convert field data. Some field types on some backends do not provide data in the correct format, this is the hook for converter functions. """ return [] def convert_durationfield_value(self, value, expression, connection): if value is not None: return datetime.timedelta(0, 0, value) def check_expression_support(self, expression): """ Check that the backend supports the provided expression. This is used on specific backends to rule out known expressions that have problematic or nonexistent implementations. If the expression has a known problem, the backend should raise NotSupportedError. """ pass def conditional_expression_supported_in_where_clause(self, expression): """ Return True, if the conditional expression is supported in the WHERE clause. """ return True def combine_expression(self, connector, sub_expressions): """ Combine a list of subexpressions into a single expression, using the provided connecting operator. This is required because operators can vary between backends (e.g., Oracle with %% and &) and between subexpression types (e.g., date expressions). """ conn = ' %s ' % connector return conn.join(sub_expressions) def combine_duration_expression(self, connector, sub_expressions): return self.combine_expression(connector, sub_expressions) def binary_placeholder_sql(self, value): """ Some backends require special syntax to insert binary content (MySQL for example uses '_binary %s'). """ return '%s' def modify_insert_params(self, placeholder, params): """ Allow modification of insert parameters. Needed for Oracle Spatial backend due to #10888. """ return params def integer_field_range(self, internal_type): """ Given an integer field internal type (e.g. 'PositiveIntegerField'), return a tuple of the (min_value, max_value) form representing the range of the column type bound to the field. """ return self.integer_field_ranges[internal_type] def subtract_temporals(self, internal_type, lhs, rhs): if self.connection.features.supports_temporal_subtraction: lhs_sql, lhs_params = lhs rhs_sql, rhs_params = rhs return '(%s - %s)' % (lhs_sql, rhs_sql), (*lhs_params, *rhs_params) raise NotSupportedError("This backend does not support %s subtraction." % internal_type) def window_frame_start(self, start): if isinstance(start, int): if start < 0: return '%d %s' % (abs(start), self.PRECEDING) elif start == 0: return self.CURRENT_ROW elif start is None: return self.UNBOUNDED_PRECEDING raise ValueError("start argument must be a negative integer, zero, or None, but got '%s'." % start) def window_frame_end(self, end): if isinstance(end, int): if end == 0: return self.CURRENT_ROW elif end > 0: return '%d %s' % (end, self.FOLLOWING) elif end is None: return self.UNBOUNDED_FOLLOWING raise ValueError("end argument must be a positive integer, zero, or None, but got '%s'." % end) def window_frame_rows_start_end(self, start=None, end=None): """ Return SQL for start and end points in an OVER clause window frame. """ if not self.connection.features.supports_over_clause: raise NotSupportedError('This backend does not support window expressions.') return self.window_frame_start(start), self.window_frame_end(end) def window_frame_range_start_end(self, start=None, end=None): return self.window_frame_rows_start_end(start, end) def explain_query_prefix(self, format=None, **options): if not self.connection.features.supports_explaining_query_execution: raise NotSupportedError('This backend does not support explaining query execution.') if format: supported_formats = self.connection.features.supported_explain_formats normalized_format = format.upper() if normalized_format not in supported_formats: msg = '%s is not a recognized format.' % normalized_format if supported_formats: msg += ' Allowed formats: %s' % ', '.join(sorted(supported_formats)) raise ValueError(msg) if options: raise ValueError('Unknown options: %s' % ', '.join(sorted(options.keys()))) return self.explain_prefix def insert_statement(self, ignore_conflicts=False): return 'INSERT INTO' def ignore_conflicts_suffix_sql(self, ignore_conflicts=None): return ''
d7d31ac8fe69674329fbfef9a5bf82c21fe786209bd090c23aa22d6fbb7d45c4
import uuid from django.conf import settings from django.db.backends.base.operations import BaseDatabaseOperations from django.utils import timezone from django.utils.duration import duration_microseconds from django.utils.encoding import force_str class DatabaseOperations(BaseDatabaseOperations): compiler_module = "django.db.backends.mysql.compiler" # MySQL stores positive fields as UNSIGNED ints. integer_field_ranges = { **BaseDatabaseOperations.integer_field_ranges, 'PositiveSmallIntegerField': (0, 65535), 'PositiveIntegerField': (0, 4294967295), 'PositiveBigIntegerField': (0, 18446744073709551615), } cast_data_types = { 'AutoField': 'signed integer', 'BigAutoField': 'signed integer', 'SmallAutoField': 'signed integer', 'CharField': 'char(%(max_length)s)', 'DecimalField': 'decimal(%(max_digits)s, %(decimal_places)s)', 'TextField': 'char', 'IntegerField': 'signed integer', 'BigIntegerField': 'signed integer', 'SmallIntegerField': 'signed integer', 'PositiveBigIntegerField': 'unsigned integer', 'PositiveIntegerField': 'unsigned integer', 'PositiveSmallIntegerField': 'unsigned integer', } cast_char_field_without_max_length = 'char' explain_prefix = 'EXPLAIN' def date_extract_sql(self, lookup_type, field_name): # https://dev.mysql.com/doc/mysql/en/date-and-time-functions.html if lookup_type == 'week_day': # DAYOFWEEK() returns an integer, 1-7, Sunday=1. return "DAYOFWEEK(%s)" % field_name elif lookup_type == 'iso_week_day': # WEEKDAY() returns an integer, 0-6, Monday=0. return "WEEKDAY(%s) + 1" % field_name elif lookup_type == 'week': # Override the value of default_week_format for consistency with # other database backends. # Mode 3: Monday, 1-53, with 4 or more days this year. return "WEEK(%s, 3)" % field_name elif lookup_type == 'iso_year': # Get the year part from the YEARWEEK function, which returns a # number as year * 100 + week. return "TRUNCATE(YEARWEEK(%s, 3), -2) / 100" % field_name else: # EXTRACT returns 1-53 based on ISO-8601 for the week number. return "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name) def date_trunc_sql(self, lookup_type, field_name): fields = { 'year': '%%Y-01-01', 'month': '%%Y-%%m-01', } # Use double percents to escape. if lookup_type in fields: format_str = fields[lookup_type] return "CAST(DATE_FORMAT(%s, '%s') AS DATE)" % (field_name, format_str) elif lookup_type == 'quarter': return "MAKEDATE(YEAR(%s), 1) + INTERVAL QUARTER(%s) QUARTER - INTERVAL 1 QUARTER" % ( field_name, field_name ) elif lookup_type == 'week': return "DATE_SUB(%s, INTERVAL WEEKDAY(%s) DAY)" % ( field_name, field_name ) else: return "DATE(%s)" % (field_name) def _prepare_tzname_delta(self, tzname): if '+' in tzname: return tzname[tzname.find('+'):] elif '-' in tzname: return tzname[tzname.find('-'):] return tzname def _convert_field_to_tz(self, field_name, tzname): if settings.USE_TZ and self.connection.timezone_name != tzname: field_name = "CONVERT_TZ(%s, '%s', '%s')" % ( field_name, self.connection.timezone_name, self._prepare_tzname_delta(tzname), ) return field_name def datetime_cast_date_sql(self, field_name, tzname): field_name = self._convert_field_to_tz(field_name, tzname) return "DATE(%s)" % field_name def datetime_cast_time_sql(self, field_name, tzname): field_name = self._convert_field_to_tz(field_name, tzname) return "TIME(%s)" % field_name def datetime_extract_sql(self, lookup_type, field_name, tzname): field_name = self._convert_field_to_tz(field_name, tzname) return self.date_extract_sql(lookup_type, field_name) def datetime_trunc_sql(self, lookup_type, field_name, tzname): field_name = self._convert_field_to_tz(field_name, tzname) fields = ['year', 'month', 'day', 'hour', 'minute', 'second'] format = ('%%Y-', '%%m', '-%%d', ' %%H:', '%%i', ':%%s') # Use double percents to escape. format_def = ('0000-', '01', '-01', ' 00:', '00', ':00') if lookup_type == 'quarter': return ( "CAST(DATE_FORMAT(MAKEDATE(YEAR({field_name}), 1) + " "INTERVAL QUARTER({field_name}) QUARTER - " + "INTERVAL 1 QUARTER, '%%Y-%%m-01 00:00:00') AS DATETIME)" ).format(field_name=field_name) if lookup_type == 'week': return ( "CAST(DATE_FORMAT(DATE_SUB({field_name}, " "INTERVAL WEEKDAY({field_name}) DAY), " "'%%Y-%%m-%%d 00:00:00') AS DATETIME)" ).format(field_name=field_name) try: i = fields.index(lookup_type) + 1 except ValueError: sql = field_name else: format_str = ''.join(format[:i] + format_def[i:]) sql = "CAST(DATE_FORMAT(%s, '%s') AS DATETIME)" % (field_name, format_str) return sql def time_trunc_sql(self, lookup_type, field_name): fields = { 'hour': '%%H:00:00', 'minute': '%%H:%%i:00', 'second': '%%H:%%i:%%s', } # Use double percents to escape. if lookup_type in fields: format_str = fields[lookup_type] return "CAST(DATE_FORMAT(%s, '%s') AS TIME)" % (field_name, format_str) else: return "TIME(%s)" % (field_name) def date_interval_sql(self, timedelta): return 'INTERVAL %s MICROSECOND' % duration_microseconds(timedelta) def format_for_duration_arithmetic(self, sql): return 'INTERVAL %s MICROSECOND' % sql def force_no_ordering(self): """ "ORDER BY NULL" prevents MySQL from implicitly ordering by grouped columns. If no ordering would otherwise be applied, we don't want any implicit sorting going on. """ return [(None, ("NULL", [], False))] def last_executed_query(self, cursor, sql, params): # With MySQLdb, cursor objects have an (undocumented) "_executed" # attribute where the exact query sent to the database is saved. # See MySQLdb/cursors.py in the source distribution. # MySQLdb returns string, PyMySQL bytes. return force_str(getattr(cursor, '_executed', None), errors='replace') def no_limit_value(self): # 2**64 - 1, as recommended by the MySQL documentation return 18446744073709551615 def quote_name(self, name): if name.startswith("`") and name.endswith("`"): return name # Quoting once is enough. return "`%s`" % name def random_function_sql(self): return 'RAND()' def sql_flush(self, style, tables, sequences, allow_cascade=False): # NB: The generated SQL below is specific to MySQL # 'TRUNCATE x;', 'TRUNCATE y;', 'TRUNCATE z;'... style SQL statements # to clear all tables of all data if tables: sql = ['SET FOREIGN_KEY_CHECKS = 0;'] for table in tables: sql.append('%s %s;' % ( style.SQL_KEYWORD('TRUNCATE'), style.SQL_FIELD(self.quote_name(table)), )) sql.append('SET FOREIGN_KEY_CHECKS = 1;') sql.extend(self.sequence_reset_by_name_sql(style, sequences)) return sql else: return [] def validate_autopk_value(self, value): # MySQLism: zero in AUTO_INCREMENT field does not work. Refs #17653. if value == 0: raise ValueError('The database backend does not accept 0 as a ' 'value for AutoField.') return value def adapt_datetimefield_value(self, value): if value is None: return None # Expression values are adapted by the database. if hasattr(value, 'resolve_expression'): return value # MySQL doesn't support tz-aware datetimes if timezone.is_aware(value): if settings.USE_TZ: value = timezone.make_naive(value, self.connection.timezone) else: raise ValueError("MySQL backend does not support timezone-aware datetimes when USE_TZ is False.") return str(value) def adapt_timefield_value(self, value): if value is None: return None # Expression values are adapted by the database. if hasattr(value, 'resolve_expression'): return value # MySQL doesn't support tz-aware times if timezone.is_aware(value): raise ValueError("MySQL backend does not support timezone-aware times.") return str(value) def max_name_length(self): return 64 def bulk_insert_sql(self, fields, placeholder_rows): placeholder_rows_sql = (", ".join(row) for row in placeholder_rows) values_sql = ", ".join("(%s)" % sql for sql in placeholder_rows_sql) return "VALUES " + values_sql def combine_expression(self, connector, sub_expressions): if connector == '^': return 'POW(%s)' % ','.join(sub_expressions) # Convert the result to a signed integer since MySQL's binary operators # return an unsigned integer. elif connector in ('&', '|', '<<'): return 'CONVERT(%s, SIGNED)' % connector.join(sub_expressions) elif connector == '>>': lhs, rhs = sub_expressions return 'FLOOR(%(lhs)s / POW(2, %(rhs)s))' % {'lhs': lhs, 'rhs': rhs} return super().combine_expression(connector, sub_expressions) def get_db_converters(self, expression): converters = super().get_db_converters(expression) internal_type = expression.output_field.get_internal_type() if internal_type in ['BooleanField', 'NullBooleanField']: converters.append(self.convert_booleanfield_value) elif internal_type == 'DateTimeField': if settings.USE_TZ: converters.append(self.convert_datetimefield_value) elif internal_type == 'UUIDField': converters.append(self.convert_uuidfield_value) return converters def convert_booleanfield_value(self, value, expression, connection): if value in (0, 1): value = bool(value) return value def convert_datetimefield_value(self, value, expression, connection): if value is not None: value = timezone.make_aware(value, self.connection.timezone) return value def convert_uuidfield_value(self, value, expression, connection): if value is not None: value = uuid.UUID(value) return value def binary_placeholder_sql(self, value): return '_binary %s' if value is not None and not hasattr(value, 'as_sql') else '%s' def subtract_temporals(self, internal_type, lhs, rhs): lhs_sql, lhs_params = lhs rhs_sql, rhs_params = rhs if internal_type == 'TimeField': if self.connection.mysql_is_mariadb: # MariaDB includes the microsecond component in TIME_TO_SEC as # a decimal. MySQL returns an integer without microseconds. return 'CAST((TIME_TO_SEC(%(lhs)s) - TIME_TO_SEC(%(rhs)s)) * 1000000 AS SIGNED)' % { 'lhs': lhs_sql, 'rhs': rhs_sql }, (*lhs_params, *rhs_params) return ( "((TIME_TO_SEC(%(lhs)s) * 1000000 + MICROSECOND(%(lhs)s)) -" " (TIME_TO_SEC(%(rhs)s) * 1000000 + MICROSECOND(%(rhs)s)))" ) % {'lhs': lhs_sql, 'rhs': rhs_sql}, tuple(lhs_params) * 2 + tuple(rhs_params) * 2 params = (*lhs_params, *rhs_params) return "TIMESTAMPDIFF(MICROSECOND, %s, %s)" % (rhs_sql, lhs_sql), params def explain_query_prefix(self, format=None, **options): # Alias MySQL's TRADITIONAL to TEXT for consistency with other backends. if format and format.upper() == 'TEXT': format = 'TRADITIONAL' elif not format and 'TREE' in self.connection.features.supported_explain_formats: # Use TREE by default (if supported) as it's more informative. format = 'TREE' analyze = options.pop('analyze', False) prefix = super().explain_query_prefix(format, **options) if analyze and self.connection.features.supports_explain_analyze: # MariaDB uses ANALYZE instead of EXPLAIN ANALYZE. prefix = 'ANALYZE' if self.connection.mysql_is_mariadb else prefix + ' ANALYZE' if format and not (analyze and not self.connection.mysql_is_mariadb): # Only MariaDB supports the analyze option with formats. prefix += ' FORMAT=%s' % format if self.connection.features.needs_explain_extended and not analyze and format is None: # ANALYZE, EXTENDED, and FORMAT are mutually exclusive options. prefix += ' EXTENDED' return prefix def regex_lookup(self, lookup_type): # REGEXP BINARY doesn't work correctly in MySQL 8+ and REGEXP_LIKE # doesn't exist in MySQL 5.6 or in MariaDB. if self.connection.mysql_version < (8, 0, 0) or self.connection.mysql_is_mariadb: if lookup_type == 'regex': return '%s REGEXP BINARY %s' return '%s REGEXP %s' match_option = 'c' if lookup_type == 'regex' else 'i' return "REGEXP_LIKE(%%s, %%s, '%s')" % match_option def insert_statement(self, ignore_conflicts=False): return 'INSERT IGNORE INTO' if ignore_conflicts else super().insert_statement(ignore_conflicts)
a5d46e6a99ec8b258b573282b594ab4972511b7da24ba2f5a0e7b0c8f46bd9bb
from psycopg2.extras import Inet from django.conf import settings from django.db import NotSupportedError from django.db.backends.base.operations import BaseDatabaseOperations class DatabaseOperations(BaseDatabaseOperations): cast_char_field_without_max_length = 'varchar' explain_prefix = 'EXPLAIN' cast_data_types = { 'AutoField': 'integer', 'BigAutoField': 'bigint', 'SmallAutoField': 'smallint', } def unification_cast_sql(self, output_field): internal_type = output_field.get_internal_type() if internal_type in ("GenericIPAddressField", "IPAddressField", "TimeField", "UUIDField"): # PostgreSQL will resolve a union as type 'text' if input types are # 'unknown'. # https://www.postgresql.org/docs/current/typeconv-union-case.html # These fields cannot be implicitly cast back in the default # PostgreSQL configuration so we need to explicitly cast them. # We must also remove components of the type within brackets: # varchar(255) -> varchar. return 'CAST(%%s AS %s)' % output_field.db_type(self.connection).split('(')[0] return '%s' def date_extract_sql(self, lookup_type, field_name): # https://www.postgresql.org/docs/current/functions-datetime.html#FUNCTIONS-DATETIME-EXTRACT if lookup_type == 'week_day': # For consistency across backends, we return Sunday=1, Saturday=7. return "EXTRACT('dow' FROM %s) + 1" % field_name elif lookup_type == 'iso_week_day': return "EXTRACT('isodow' FROM %s)" % field_name elif lookup_type == 'iso_year': return "EXTRACT('isoyear' FROM %s)" % field_name else: return "EXTRACT('%s' FROM %s)" % (lookup_type, field_name) def date_trunc_sql(self, lookup_type, field_name): # https://www.postgresql.org/docs/current/functions-datetime.html#FUNCTIONS-DATETIME-TRUNC return "DATE_TRUNC('%s', %s)" % (lookup_type, field_name) def _prepare_tzname_delta(self, tzname): if '+' in tzname: return tzname.replace('+', '-') elif '-' in tzname: return tzname.replace('-', '+') return tzname def _convert_field_to_tz(self, field_name, tzname): if settings.USE_TZ: field_name = "%s AT TIME ZONE '%s'" % (field_name, self._prepare_tzname_delta(tzname)) return field_name def datetime_cast_date_sql(self, field_name, tzname): field_name = self._convert_field_to_tz(field_name, tzname) return '(%s)::date' % field_name def datetime_cast_time_sql(self, field_name, tzname): field_name = self._convert_field_to_tz(field_name, tzname) return '(%s)::time' % field_name def datetime_extract_sql(self, lookup_type, field_name, tzname): field_name = self._convert_field_to_tz(field_name, tzname) return self.date_extract_sql(lookup_type, field_name) def datetime_trunc_sql(self, lookup_type, field_name, tzname): field_name = self._convert_field_to_tz(field_name, tzname) # https://www.postgresql.org/docs/current/functions-datetime.html#FUNCTIONS-DATETIME-TRUNC return "DATE_TRUNC('%s', %s)" % (lookup_type, field_name) def time_trunc_sql(self, lookup_type, field_name): return "DATE_TRUNC('%s', %s)::time" % (lookup_type, field_name) def deferrable_sql(self): return " DEFERRABLE INITIALLY DEFERRED" def fetch_returned_insert_rows(self, cursor): """ Given a cursor object that has just performed an INSERT...RETURNING statement into a table, return the tuple of returned data. """ return cursor.fetchall() def lookup_cast(self, lookup_type, internal_type=None): lookup = '%s' # Cast text lookups to text to allow things like filter(x__contains=4) if lookup_type in ('iexact', 'contains', 'icontains', 'startswith', 'istartswith', 'endswith', 'iendswith', 'regex', 'iregex'): if internal_type in ('IPAddressField', 'GenericIPAddressField'): lookup = "HOST(%s)" elif internal_type in ('CICharField', 'CIEmailField', 'CITextField'): lookup = '%s::citext' else: lookup = "%s::text" # Use UPPER(x) for case-insensitive lookups; it's faster. if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'): lookup = 'UPPER(%s)' % lookup return lookup def no_limit_value(self): return None def prepare_sql_script(self, sql): return [sql] def quote_name(self, name): if name.startswith('"') and name.endswith('"'): return name # Quoting once is enough. return '"%s"' % name def set_time_zone_sql(self): return "SET TIME ZONE %s" def sql_flush(self, style, tables, sequences, allow_cascade=False): if tables: # Perform a single SQL 'TRUNCATE x, y, z...;' statement. It allows # us to truncate tables referenced by a foreign key in any other # table. tables_sql = ', '.join( style.SQL_FIELD(self.quote_name(table)) for table in tables) if allow_cascade: sql = ['%s %s %s;' % ( style.SQL_KEYWORD('TRUNCATE'), tables_sql, style.SQL_KEYWORD('CASCADE'), )] else: sql = ['%s %s;' % ( style.SQL_KEYWORD('TRUNCATE'), tables_sql, )] sql.extend(self.sequence_reset_by_name_sql(style, sequences)) return sql else: return [] def sequence_reset_by_name_sql(self, style, sequences): # 'ALTER SEQUENCE sequence_name RESTART WITH 1;'... style SQL statements # to reset sequence indices sql = [] for sequence_info in sequences: table_name = sequence_info['table'] # 'id' will be the case if it's an m2m using an autogenerated # intermediate table (see BaseDatabaseIntrospection.sequence_list). column_name = sequence_info['column'] or 'id' sql.append("%s setval(pg_get_serial_sequence('%s','%s'), 1, false);" % ( style.SQL_KEYWORD('SELECT'), style.SQL_TABLE(self.quote_name(table_name)), style.SQL_FIELD(column_name), )) return sql def tablespace_sql(self, tablespace, inline=False): if inline: return "USING INDEX TABLESPACE %s" % self.quote_name(tablespace) else: return "TABLESPACE %s" % self.quote_name(tablespace) def sequence_reset_sql(self, style, model_list): from django.db import models output = [] qn = self.quote_name for model in model_list: # Use `coalesce` to set the sequence for each model to the max pk value if there are records, # or 1 if there are none. Set the `is_called` property (the third argument to `setval`) to true # if there are records (as the max pk value is already in use), otherwise set it to false. # Use pg_get_serial_sequence to get the underlying sequence name from the table name # and column name (available since PostgreSQL 8) for f in model._meta.local_fields: if isinstance(f, models.AutoField): output.append( "%s setval(pg_get_serial_sequence('%s','%s'), " "coalesce(max(%s), 1), max(%s) %s null) %s %s;" % ( style.SQL_KEYWORD('SELECT'), style.SQL_TABLE(qn(model._meta.db_table)), style.SQL_FIELD(f.column), style.SQL_FIELD(qn(f.column)), style.SQL_FIELD(qn(f.column)), style.SQL_KEYWORD('IS NOT'), style.SQL_KEYWORD('FROM'), style.SQL_TABLE(qn(model._meta.db_table)), ) ) break # Only one AutoField is allowed per model, so don't bother continuing. for f in model._meta.many_to_many: if not f.remote_field.through: output.append( "%s setval(pg_get_serial_sequence('%s','%s'), " "coalesce(max(%s), 1), max(%s) %s null) %s %s;" % ( style.SQL_KEYWORD('SELECT'), style.SQL_TABLE(qn(f.m2m_db_table())), style.SQL_FIELD('id'), style.SQL_FIELD(qn('id')), style.SQL_FIELD(qn('id')), style.SQL_KEYWORD('IS NOT'), style.SQL_KEYWORD('FROM'), style.SQL_TABLE(qn(f.m2m_db_table())) ) ) return output def prep_for_iexact_query(self, x): return x def max_name_length(self): """ Return the maximum length of an identifier. The maximum length of an identifier is 63 by default, but can be changed by recompiling PostgreSQL after editing the NAMEDATALEN macro in src/include/pg_config_manual.h. This implementation returns 63, but can be overridden by a custom database backend that inherits most of its behavior from this one. """ return 63 def distinct_sql(self, fields, params): if fields: params = [param for param_list in params for param in param_list] return (['DISTINCT ON (%s)' % ', '.join(fields)], params) else: return ['DISTINCT'], [] def last_executed_query(self, cursor, sql, params): # http://initd.org/psycopg/docs/cursor.html#cursor.query # The query attribute is a Psycopg extension to the DB API 2.0. if cursor.query is not None: return cursor.query.decode() return None def return_insert_columns(self, fields): if not fields: return '', () columns = [ '%s.%s' % ( self.quote_name(field.model._meta.db_table), self.quote_name(field.column), ) for field in fields ] return 'RETURNING %s' % ', '.join(columns), () def bulk_insert_sql(self, fields, placeholder_rows): placeholder_rows_sql = (", ".join(row) for row in placeholder_rows) values_sql = ", ".join("(%s)" % sql for sql in placeholder_rows_sql) return "VALUES " + values_sql def adapt_datefield_value(self, value): return value def adapt_datetimefield_value(self, value): return value def adapt_timefield_value(self, value): return value def adapt_ipaddressfield_value(self, value): if value: return Inet(value) return None def subtract_temporals(self, internal_type, lhs, rhs): if internal_type == 'DateField': lhs_sql, lhs_params = lhs rhs_sql, rhs_params = rhs params = (*lhs_params, *rhs_params) return "(interval '1 day' * (%s - %s))" % (lhs_sql, rhs_sql), params return super().subtract_temporals(internal_type, lhs, rhs) def window_frame_range_start_end(self, start=None, end=None): start_, end_ = super().window_frame_range_start_end(start, end) if (start and start < 0) or (end and end > 0): raise NotSupportedError( 'PostgreSQL only supports UNBOUNDED together with PRECEDING ' 'and FOLLOWING.' ) return start_, end_ def explain_query_prefix(self, format=None, **options): prefix = super().explain_query_prefix(format) extra = {} if format: extra['FORMAT'] = format if options: extra.update({ name.upper(): 'true' if value else 'false' for name, value in options.items() }) if extra: prefix += ' (%s)' % ', '.join('%s %s' % i for i in extra.items()) return prefix def ignore_conflicts_suffix_sql(self, ignore_conflicts=None): return 'ON CONFLICT DO NOTHING' if ignore_conflicts else super().ignore_conflicts_suffix_sql(ignore_conflicts)
82d53d4882bc0ad56e211361181a44d984644e81453c746d310cb3d087192ddd
import datetime import decimal import uuid from functools import lru_cache from itertools import chain from django.conf import settings from django.core.exceptions import FieldError from django.db import utils from django.db.backends.base.operations import BaseDatabaseOperations from django.db.models import aggregates, fields from django.db.models.expressions import Col from django.utils import timezone from django.utils.dateparse import parse_date, parse_datetime, parse_time from django.utils.duration import duration_microseconds from django.utils.functional import cached_property class DatabaseOperations(BaseDatabaseOperations): cast_char_field_without_max_length = 'text' cast_data_types = { 'DateField': 'TEXT', 'DateTimeField': 'TEXT', } explain_prefix = 'EXPLAIN QUERY PLAN' def bulk_batch_size(self, fields, objs): """ SQLite has a compile-time default (SQLITE_LIMIT_VARIABLE_NUMBER) of 999 variables per query. If there's only a single field to insert, the limit is 500 (SQLITE_MAX_COMPOUND_SELECT). """ if len(fields) == 1: return 500 elif len(fields) > 1: return self.connection.features.max_query_params // len(fields) else: return len(objs) def check_expression_support(self, expression): bad_fields = (fields.DateField, fields.DateTimeField, fields.TimeField) bad_aggregates = (aggregates.Sum, aggregates.Avg, aggregates.Variance, aggregates.StdDev) if isinstance(expression, bad_aggregates): for expr in expression.get_source_expressions(): try: output_field = expr.output_field except (AttributeError, FieldError): # Not every subexpression has an output_field which is fine # to ignore. pass else: if isinstance(output_field, bad_fields): raise utils.NotSupportedError( 'You cannot use Sum, Avg, StdDev, and Variance ' 'aggregations on date/time fields in sqlite3 ' 'since date/time is saved as text.' ) if isinstance(expression, aggregates.Aggregate) and len(expression.source_expressions) > 1: raise utils.NotSupportedError( "SQLite doesn't support DISTINCT on aggregate functions " "accepting multiple arguments." ) def date_extract_sql(self, lookup_type, field_name): """ Support EXTRACT with a user-defined function django_date_extract() that's registered in connect(). Use single quotes because this is a string and could otherwise cause a collision with a field name. """ return "django_date_extract('%s', %s)" % (lookup_type.lower(), field_name) def date_interval_sql(self, timedelta): return str(duration_microseconds(timedelta)) def format_for_duration_arithmetic(self, sql): """Do nothing since formatting is handled in the custom function.""" return sql def date_trunc_sql(self, lookup_type, field_name): return "django_date_trunc('%s', %s)" % (lookup_type.lower(), field_name) def time_trunc_sql(self, lookup_type, field_name): return "django_time_trunc('%s', %s)" % (lookup_type.lower(), field_name) def _convert_tznames_to_sql(self, tzname): if settings.USE_TZ: return "'%s'" % tzname, "'%s'" % self.connection.timezone_name return 'NULL', 'NULL' def datetime_cast_date_sql(self, field_name, tzname): return 'django_datetime_cast_date(%s, %s, %s)' % ( field_name, *self._convert_tznames_to_sql(tzname), ) def datetime_cast_time_sql(self, field_name, tzname): return 'django_datetime_cast_time(%s, %s, %s)' % ( field_name, *self._convert_tznames_to_sql(tzname), ) def datetime_extract_sql(self, lookup_type, field_name, tzname): return "django_datetime_extract('%s', %s, %s, %s)" % ( lookup_type.lower(), field_name, *self._convert_tznames_to_sql(tzname), ) def datetime_trunc_sql(self, lookup_type, field_name, tzname): return "django_datetime_trunc('%s', %s, %s, %s)" % ( lookup_type.lower(), field_name, *self._convert_tznames_to_sql(tzname), ) def time_extract_sql(self, lookup_type, field_name): return "django_time_extract('%s', %s)" % (lookup_type.lower(), field_name) def pk_default_value(self): return "NULL" def _quote_params_for_last_executed_query(self, params): """ Only for last_executed_query! Don't use this to execute SQL queries! """ # This function is limited both by SQLITE_LIMIT_VARIABLE_NUMBER (the # number of parameters, default = 999) and SQLITE_MAX_COLUMN (the # number of return values, default = 2000). Since Python's sqlite3 # module doesn't expose the get_limit() C API, assume the default # limits are in effect and split the work in batches if needed. BATCH_SIZE = 999 if len(params) > BATCH_SIZE: results = () for index in range(0, len(params), BATCH_SIZE): chunk = params[index:index + BATCH_SIZE] results += self._quote_params_for_last_executed_query(chunk) return results sql = 'SELECT ' + ', '.join(['QUOTE(?)'] * len(params)) # Bypass Django's wrappers and use the underlying sqlite3 connection # to avoid logging this query - it would trigger infinite recursion. cursor = self.connection.connection.cursor() # Native sqlite3 cursors cannot be used as context managers. try: return cursor.execute(sql, params).fetchone() finally: cursor.close() def last_executed_query(self, cursor, sql, params): # Python substitutes parameters in Modules/_sqlite/cursor.c with: # pysqlite_statement_bind_parameters(self->statement, parameters, allow_8bit_chars); # Unfortunately there is no way to reach self->statement from Python, # so we quote and substitute parameters manually. if params: if isinstance(params, (list, tuple)): params = self._quote_params_for_last_executed_query(params) else: values = tuple(params.values()) values = self._quote_params_for_last_executed_query(values) params = dict(zip(params, values)) return sql % params # For consistency with SQLiteCursorWrapper.execute(), just return sql # when there are no parameters. See #13648 and #17158. else: return sql def quote_name(self, name): if name.startswith('"') and name.endswith('"'): return name # Quoting once is enough. return '"%s"' % name def no_limit_value(self): return -1 def __references_graph(self, table_name): query = """ WITH tables AS ( SELECT %s name UNION SELECT sqlite_master.name FROM sqlite_master JOIN tables ON (sql REGEXP %s || tables.name || %s) ) SELECT name FROM tables; """ params = ( table_name, r'(?i)\s+references\s+("|\')?', r'("|\')?\s*\(', ) with self.connection.cursor() as cursor: results = cursor.execute(query, params) return [row[0] for row in results.fetchall()] @cached_property def _references_graph(self): # 512 is large enough to fit the ~330 tables (as of this writing) in # Django's test suite. return lru_cache(maxsize=512)(self.__references_graph) def sql_flush(self, style, tables, sequences, allow_cascade=False): if tables and allow_cascade: # Simulate TRUNCATE CASCADE by recursively collecting the tables # referencing the tables to be flushed. tables = set(chain.from_iterable(self._references_graph(table) for table in tables)) # Note: No requirement for reset of auto-incremented indices (cf. other # sql_flush() implementations). Just return SQL at this point return ['%s %s %s;' % ( style.SQL_KEYWORD('DELETE'), style.SQL_KEYWORD('FROM'), style.SQL_FIELD(self.quote_name(table)) ) for table in tables] def adapt_datetimefield_value(self, value): if value is None: return None # Expression values are adapted by the database. if hasattr(value, 'resolve_expression'): return value # SQLite doesn't support tz-aware datetimes if timezone.is_aware(value): if settings.USE_TZ: value = timezone.make_naive(value, self.connection.timezone) else: raise ValueError("SQLite backend does not support timezone-aware datetimes when USE_TZ is False.") return str(value) def adapt_timefield_value(self, value): if value is None: return None # Expression values are adapted by the database. if hasattr(value, 'resolve_expression'): return value # SQLite doesn't support tz-aware datetimes if timezone.is_aware(value): raise ValueError("SQLite backend does not support timezone-aware times.") return str(value) def get_db_converters(self, expression): converters = super().get_db_converters(expression) internal_type = expression.output_field.get_internal_type() if internal_type == 'DateTimeField': converters.append(self.convert_datetimefield_value) elif internal_type == 'DateField': converters.append(self.convert_datefield_value) elif internal_type == 'TimeField': converters.append(self.convert_timefield_value) elif internal_type == 'DecimalField': converters.append(self.get_decimalfield_converter(expression)) elif internal_type == 'UUIDField': converters.append(self.convert_uuidfield_value) elif internal_type in ('NullBooleanField', 'BooleanField'): converters.append(self.convert_booleanfield_value) return converters def convert_datetimefield_value(self, value, expression, connection): if value is not None: if not isinstance(value, datetime.datetime): value = parse_datetime(value) if settings.USE_TZ and not timezone.is_aware(value): value = timezone.make_aware(value, self.connection.timezone) return value def convert_datefield_value(self, value, expression, connection): if value is not None: if not isinstance(value, datetime.date): value = parse_date(value) return value def convert_timefield_value(self, value, expression, connection): if value is not None: if not isinstance(value, datetime.time): value = parse_time(value) return value def get_decimalfield_converter(self, expression): # SQLite stores only 15 significant digits. Digits coming from # float inaccuracy must be removed. create_decimal = decimal.Context(prec=15).create_decimal_from_float if isinstance(expression, Col): quantize_value = decimal.Decimal(1).scaleb(-expression.output_field.decimal_places) def converter(value, expression, connection): if value is not None: return create_decimal(value).quantize(quantize_value, context=expression.output_field.context) else: def converter(value, expression, connection): if value is not None: return create_decimal(value) return converter def convert_uuidfield_value(self, value, expression, connection): if value is not None: value = uuid.UUID(value) return value def convert_booleanfield_value(self, value, expression, connection): return bool(value) if value in (1, 0) else value def bulk_insert_sql(self, fields, placeholder_rows): return " UNION ALL ".join( "SELECT %s" % ", ".join(row) for row in placeholder_rows ) def combine_expression(self, connector, sub_expressions): # SQLite doesn't have a ^ operator, so use the user-defined POWER # function that's registered in connect(). if connector == '^': return 'POWER(%s)' % ','.join(sub_expressions) return super().combine_expression(connector, sub_expressions) def combine_duration_expression(self, connector, sub_expressions): if connector not in ['+', '-']: raise utils.DatabaseError('Invalid connector for timedelta: %s.' % connector) fn_params = ["'%s'" % connector] + sub_expressions if len(fn_params) > 3: raise ValueError('Too many params for timedelta operations.') return "django_format_dtdelta(%s)" % ', '.join(fn_params) def integer_field_range(self, internal_type): # SQLite doesn't enforce any integer constraints return (None, None) def subtract_temporals(self, internal_type, lhs, rhs): lhs_sql, lhs_params = lhs rhs_sql, rhs_params = rhs params = (*lhs_params, *rhs_params) if internal_type == 'TimeField': return 'django_time_diff(%s, %s)' % (lhs_sql, rhs_sql), params return 'django_timestamp_diff(%s, %s)' % (lhs_sql, rhs_sql), params def insert_statement(self, ignore_conflicts=False): return 'INSERT OR IGNORE INTO' if ignore_conflicts else super().insert_statement(ignore_conflicts)
0a4cb023b6b91efcf259e2712866a3e7800a2329dfc5232cc06820e8d555d8b3
from django.conf import settings from django.utils.translation import get_supported_language_variant from django.utils.translation.trans_real import language_code_re from . import Error, Tags, register E001 = Error( 'You have provided an invalid value for the LANGUAGE_CODE setting: {!r}.', id='translation.E001', ) E002 = Error( 'You have provided an invalid language code in the LANGUAGES setting: {!r}.', id='translation.E002', ) E003 = Error( 'You have provided an invalid language code in the LANGUAGES_BIDI setting: {!r}.', id='translation.E003', ) E004 = Error( 'You have provided a value for the LANGUAGE_CODE setting that is not in ' 'the LANGUAGES setting.', id='translation.E004', ) @register(Tags.translation) def check_setting_language_code(app_configs, **kwargs): """Error if LANGUAGE_CODE setting is invalid.""" tag = settings.LANGUAGE_CODE if not isinstance(tag, str) or not language_code_re.match(tag): return [Error(E001.msg.format(tag), id=E001.id)] return [] @register(Tags.translation) def check_setting_languages(app_configs, **kwargs): """Error if LANGUAGES setting is invalid.""" return [ Error(E002.msg.format(tag), id=E002.id) for tag, _ in settings.LANGUAGES if not isinstance(tag, str) or not language_code_re.match(tag) ] @register(Tags.translation) def check_setting_languages_bidi(app_configs, **kwargs): """Error if LANGUAGES_BIDI setting is invalid.""" return [ Error(E003.msg.format(tag), id=E003.id) for tag in settings.LANGUAGES_BIDI if not isinstance(tag, str) or not language_code_re.match(tag) ] @register(Tags.translation) def check_language_settings_consistent(app_configs, **kwargs): """Error if language settings are not consistent with each other.""" try: get_supported_language_variant(settings.LANGUAGE_CODE) except LookupError: return [E004] else: return []
6c27e1fe8763a85ecce971a0449722625c0dc0475b5ba4c55d53181fe38944c8
from django.contrib.admin.decorators import register from django.contrib.admin.filters import ( AllValuesFieldListFilter, BooleanFieldListFilter, ChoicesFieldListFilter, DateFieldListFilter, EmptyFieldListFilter, FieldListFilter, ListFilter, RelatedFieldListFilter, RelatedOnlyFieldListFilter, SimpleListFilter, ) from django.contrib.admin.options import ( HORIZONTAL, VERTICAL, ModelAdmin, StackedInline, TabularInline, ) from django.contrib.admin.sites import AdminSite, site from django.utils.module_loading import autodiscover_modules __all__ = [ "register", "ModelAdmin", "HORIZONTAL", "VERTICAL", "StackedInline", "TabularInline", "AdminSite", "site", "ListFilter", "SimpleListFilter", "FieldListFilter", "BooleanFieldListFilter", "RelatedFieldListFilter", "ChoicesFieldListFilter", "DateFieldListFilter", "AllValuesFieldListFilter", "EmptyFieldListFilter", "RelatedOnlyFieldListFilter", "autodiscover", ] def autodiscover(): autodiscover_modules('admin', register_to=site) default_app_config = 'django.contrib.admin.apps.AdminConfig'
e112d5771081126fedfae7a6feef64abb06123c40a1c818d44b31778a2ae0ffd
import collections from itertools import chain from django.apps import apps from django.conf import settings from django.contrib.admin.utils import ( NotRelationField, flatten, get_fields_from_path, ) from django.core import checks from django.core.exceptions import FieldDoesNotExist from django.db import models from django.db.models.constants import LOOKUP_SEP from django.db.models.expressions import Combinable, F, OrderBy from django.forms.models import ( BaseModelForm, BaseModelFormSet, _get_foreign_key, ) from django.template import engines from django.template.backends.django import DjangoTemplates from django.utils.module_loading import import_string def _issubclass(cls, classinfo): """ issubclass() variant that doesn't raise an exception if cls isn't a class. """ try: return issubclass(cls, classinfo) except TypeError: return False def _contains_subclass(class_path, candidate_paths): """ Return whether or not a dotted class path (or a subclass of that class) is found in a list of candidate paths. """ cls = import_string(class_path) for path in candidate_paths: try: candidate_cls = import_string(path) except ImportError: # ImportErrors are raised elsewhere. continue if _issubclass(candidate_cls, cls): return True return False def check_admin_app(app_configs, **kwargs): from django.contrib.admin.sites import all_sites errors = [] for site in all_sites: errors.extend(site.check(app_configs)) return errors def check_dependencies(**kwargs): """ Check that the admin's dependencies are correctly installed. """ if not apps.is_installed('django.contrib.admin'): return [] errors = [] app_dependencies = ( ('django.contrib.contenttypes', 401), ('django.contrib.auth', 405), ('django.contrib.messages', 406), ) for app_name, error_code in app_dependencies: if not apps.is_installed(app_name): errors.append(checks.Error( "'%s' must be in INSTALLED_APPS in order to use the admin " "application." % app_name, id='admin.E%d' % error_code, )) for engine in engines.all(): if isinstance(engine, DjangoTemplates): django_templates_instance = engine.engine break else: django_templates_instance = None if not django_templates_instance: errors.append(checks.Error( "A 'django.template.backends.django.DjangoTemplates' instance " "must be configured in TEMPLATES in order to use the admin " "application.", id='admin.E403', )) else: if ('django.contrib.auth.context_processors.auth' not in django_templates_instance.context_processors and _contains_subclass('django.contrib.auth.backends.ModelBackend', settings.AUTHENTICATION_BACKENDS)): errors.append(checks.Error( "'django.contrib.auth.context_processors.auth' must be " "enabled in DjangoTemplates (TEMPLATES) if using the default " "auth backend in order to use the admin application.", id='admin.E402', )) if ('django.contrib.messages.context_processors.messages' not in django_templates_instance.context_processors): errors.append(checks.Error( "'django.contrib.messages.context_processors.messages' must " "be enabled in DjangoTemplates (TEMPLATES) in order to use " "the admin application.", id='admin.E404', )) if not _contains_subclass('django.contrib.auth.middleware.AuthenticationMiddleware', settings.MIDDLEWARE): errors.append(checks.Error( "'django.contrib.auth.middleware.AuthenticationMiddleware' must " "be in MIDDLEWARE in order to use the admin application.", id='admin.E408', )) if not _contains_subclass('django.contrib.messages.middleware.MessageMiddleware', settings.MIDDLEWARE): errors.append(checks.Error( "'django.contrib.messages.middleware.MessageMiddleware' must " "be in MIDDLEWARE in order to use the admin application.", id='admin.E409', )) if not _contains_subclass('django.contrib.sessions.middleware.SessionMiddleware', settings.MIDDLEWARE): errors.append(checks.Error( "'django.contrib.sessions.middleware.SessionMiddleware' must " "be in MIDDLEWARE in order to use the admin application.", id='admin.E410', )) return errors class BaseModelAdminChecks: def check(self, admin_obj, **kwargs): return [ *self._check_autocomplete_fields(admin_obj), *self._check_raw_id_fields(admin_obj), *self._check_fields(admin_obj), *self._check_fieldsets(admin_obj), *self._check_exclude(admin_obj), *self._check_form(admin_obj), *self._check_filter_vertical(admin_obj), *self._check_filter_horizontal(admin_obj), *self._check_radio_fields(admin_obj), *self._check_prepopulated_fields(admin_obj), *self._check_view_on_site_url(admin_obj), *self._check_ordering(admin_obj), *self._check_readonly_fields(admin_obj), ] def _check_autocomplete_fields(self, obj): """ Check that `autocomplete_fields` is a list or tuple of model fields. """ if not isinstance(obj.autocomplete_fields, (list, tuple)): return must_be('a list or tuple', option='autocomplete_fields', obj=obj, id='admin.E036') else: return list(chain.from_iterable([ self._check_autocomplete_fields_item(obj, field_name, 'autocomplete_fields[%d]' % index) for index, field_name in enumerate(obj.autocomplete_fields) ])) def _check_autocomplete_fields_item(self, obj, field_name, label): """ Check that an item in `autocomplete_fields` is a ForeignKey or a ManyToManyField and that the item has a related ModelAdmin with search_fields defined. """ try: field = obj.model._meta.get_field(field_name) except FieldDoesNotExist: return refer_to_missing_field(field=field_name, option=label, obj=obj, id='admin.E037') else: if not field.many_to_many and not isinstance(field, models.ForeignKey): return must_be( 'a foreign key or a many-to-many field', option=label, obj=obj, id='admin.E038' ) related_admin = obj.admin_site._registry.get(field.remote_field.model) if related_admin is None: return [ checks.Error( 'An admin for model "%s" has to be registered ' 'to be referenced by %s.autocomplete_fields.' % ( field.remote_field.model.__name__, type(obj).__name__, ), obj=obj.__class__, id='admin.E039', ) ] elif not related_admin.search_fields: return [ checks.Error( '%s must define "search_fields", because it\'s ' 'referenced by %s.autocomplete_fields.' % ( related_admin.__class__.__name__, type(obj).__name__, ), obj=obj.__class__, id='admin.E040', ) ] return [] def _check_raw_id_fields(self, obj): """ Check that `raw_id_fields` only contains field names that are listed on the model. """ if not isinstance(obj.raw_id_fields, (list, tuple)): return must_be('a list or tuple', option='raw_id_fields', obj=obj, id='admin.E001') else: return list(chain.from_iterable( self._check_raw_id_fields_item(obj, field_name, 'raw_id_fields[%d]' % index) for index, field_name in enumerate(obj.raw_id_fields) )) def _check_raw_id_fields_item(self, obj, field_name, label): """ Check an item of `raw_id_fields`, i.e. check that field named `field_name` exists in model `model` and is a ForeignKey or a ManyToManyField. """ try: field = obj.model._meta.get_field(field_name) except FieldDoesNotExist: return refer_to_missing_field(field=field_name, option=label, obj=obj, id='admin.E002') else: if not field.many_to_many and not isinstance(field, models.ForeignKey): return must_be('a foreign key or a many-to-many field', option=label, obj=obj, id='admin.E003') else: return [] def _check_fields(self, obj): """ Check that `fields` only refer to existing fields, doesn't contain duplicates. Check if at most one of `fields` and `fieldsets` is defined. """ if obj.fields is None: return [] elif not isinstance(obj.fields, (list, tuple)): return must_be('a list or tuple', option='fields', obj=obj, id='admin.E004') elif obj.fieldsets: return [ checks.Error( "Both 'fieldsets' and 'fields' are specified.", obj=obj.__class__, id='admin.E005', ) ] fields = flatten(obj.fields) if len(fields) != len(set(fields)): return [ checks.Error( "The value of 'fields' contains duplicate field(s).", obj=obj.__class__, id='admin.E006', ) ] return list(chain.from_iterable( self._check_field_spec(obj, field_name, 'fields') for field_name in obj.fields )) def _check_fieldsets(self, obj): """ Check that fieldsets is properly formatted and doesn't contain duplicates. """ if obj.fieldsets is None: return [] elif not isinstance(obj.fieldsets, (list, tuple)): return must_be('a list or tuple', option='fieldsets', obj=obj, id='admin.E007') else: seen_fields = [] return list(chain.from_iterable( self._check_fieldsets_item(obj, fieldset, 'fieldsets[%d]' % index, seen_fields) for index, fieldset in enumerate(obj.fieldsets) )) def _check_fieldsets_item(self, obj, fieldset, label, seen_fields): """ Check an item of `fieldsets`, i.e. check that this is a pair of a set name and a dictionary containing "fields" key. """ if not isinstance(fieldset, (list, tuple)): return must_be('a list or tuple', option=label, obj=obj, id='admin.E008') elif len(fieldset) != 2: return must_be('of length 2', option=label, obj=obj, id='admin.E009') elif not isinstance(fieldset[1], dict): return must_be('a dictionary', option='%s[1]' % label, obj=obj, id='admin.E010') elif 'fields' not in fieldset[1]: return [ checks.Error( "The value of '%s[1]' must contain the key 'fields'." % label, obj=obj.__class__, id='admin.E011', ) ] elif not isinstance(fieldset[1]['fields'], (list, tuple)): return must_be('a list or tuple', option="%s[1]['fields']" % label, obj=obj, id='admin.E008') seen_fields.extend(flatten(fieldset[1]['fields'])) if len(seen_fields) != len(set(seen_fields)): return [ checks.Error( "There are duplicate field(s) in '%s[1]'." % label, obj=obj.__class__, id='admin.E012', ) ] return list(chain.from_iterable( self._check_field_spec(obj, fieldset_fields, '%s[1]["fields"]' % label) for fieldset_fields in fieldset[1]['fields'] )) def _check_field_spec(self, obj, fields, label): """ `fields` should be an item of `fields` or an item of fieldset[1]['fields'] for any `fieldset` in `fieldsets`. It should be a field name or a tuple of field names. """ if isinstance(fields, tuple): return list(chain.from_iterable( self._check_field_spec_item(obj, field_name, "%s[%d]" % (label, index)) for index, field_name in enumerate(fields) )) else: return self._check_field_spec_item(obj, fields, label) def _check_field_spec_item(self, obj, field_name, label): if field_name in obj.readonly_fields: # Stuff can be put in fields that isn't actually a model field if # it's in readonly_fields, readonly_fields will handle the # validation of such things. return [] else: try: field = obj.model._meta.get_field(field_name) except FieldDoesNotExist: # If we can't find a field on the model that matches, it could # be an extra field on the form. return [] else: if (isinstance(field, models.ManyToManyField) and not field.remote_field.through._meta.auto_created): return [ checks.Error( "The value of '%s' cannot include the ManyToManyField '%s', " "because that field manually specifies a relationship model." % (label, field_name), obj=obj.__class__, id='admin.E013', ) ] else: return [] def _check_exclude(self, obj): """ Check that exclude is a sequence without duplicates. """ if obj.exclude is None: # default value is None return [] elif not isinstance(obj.exclude, (list, tuple)): return must_be('a list or tuple', option='exclude', obj=obj, id='admin.E014') elif len(obj.exclude) > len(set(obj.exclude)): return [ checks.Error( "The value of 'exclude' contains duplicate field(s).", obj=obj.__class__, id='admin.E015', ) ] else: return [] def _check_form(self, obj): """ Check that form subclasses BaseModelForm. """ if not _issubclass(obj.form, BaseModelForm): return must_inherit_from(parent='BaseModelForm', option='form', obj=obj, id='admin.E016') else: return [] def _check_filter_vertical(self, obj): """ Check that filter_vertical is a sequence of field names. """ if not isinstance(obj.filter_vertical, (list, tuple)): return must_be('a list or tuple', option='filter_vertical', obj=obj, id='admin.E017') else: return list(chain.from_iterable( self._check_filter_item(obj, field_name, "filter_vertical[%d]" % index) for index, field_name in enumerate(obj.filter_vertical) )) def _check_filter_horizontal(self, obj): """ Check that filter_horizontal is a sequence of field names. """ if not isinstance(obj.filter_horizontal, (list, tuple)): return must_be('a list or tuple', option='filter_horizontal', obj=obj, id='admin.E018') else: return list(chain.from_iterable( self._check_filter_item(obj, field_name, "filter_horizontal[%d]" % index) for index, field_name in enumerate(obj.filter_horizontal) )) def _check_filter_item(self, obj, field_name, label): """ Check one item of `filter_vertical` or `filter_horizontal`, i.e. check that given field exists and is a ManyToManyField. """ try: field = obj.model._meta.get_field(field_name) except FieldDoesNotExist: return refer_to_missing_field(field=field_name, option=label, obj=obj, id='admin.E019') else: if not field.many_to_many: return must_be('a many-to-many field', option=label, obj=obj, id='admin.E020') else: return [] def _check_radio_fields(self, obj): """ Check that `radio_fields` is a dictionary. """ if not isinstance(obj.radio_fields, dict): return must_be('a dictionary', option='radio_fields', obj=obj, id='admin.E021') else: return list(chain.from_iterable( self._check_radio_fields_key(obj, field_name, 'radio_fields') + self._check_radio_fields_value(obj, val, 'radio_fields["%s"]' % field_name) for field_name, val in obj.radio_fields.items() )) def _check_radio_fields_key(self, obj, field_name, label): """ Check that a key of `radio_fields` dictionary is name of existing field and that the field is a ForeignKey or has `choices` defined. """ try: field = obj.model._meta.get_field(field_name) except FieldDoesNotExist: return refer_to_missing_field(field=field_name, option=label, obj=obj, id='admin.E022') else: if not (isinstance(field, models.ForeignKey) or field.choices): return [ checks.Error( "The value of '%s' refers to '%s', which is not an " "instance of ForeignKey, and does not have a 'choices' definition." % ( label, field_name ), obj=obj.__class__, id='admin.E023', ) ] else: return [] def _check_radio_fields_value(self, obj, val, label): """ Check type of a value of `radio_fields` dictionary. """ from django.contrib.admin.options import HORIZONTAL, VERTICAL if val not in (HORIZONTAL, VERTICAL): return [ checks.Error( "The value of '%s' must be either admin.HORIZONTAL or admin.VERTICAL." % label, obj=obj.__class__, id='admin.E024', ) ] else: return [] def _check_view_on_site_url(self, obj): if not callable(obj.view_on_site) and not isinstance(obj.view_on_site, bool): return [ checks.Error( "The value of 'view_on_site' must be a callable or a boolean value.", obj=obj.__class__, id='admin.E025', ) ] else: return [] def _check_prepopulated_fields(self, obj): """ Check that `prepopulated_fields` is a dictionary containing allowed field types. """ if not isinstance(obj.prepopulated_fields, dict): return must_be('a dictionary', option='prepopulated_fields', obj=obj, id='admin.E026') else: return list(chain.from_iterable( self._check_prepopulated_fields_key(obj, field_name, 'prepopulated_fields') + self._check_prepopulated_fields_value(obj, val, 'prepopulated_fields["%s"]' % field_name) for field_name, val in obj.prepopulated_fields.items() )) def _check_prepopulated_fields_key(self, obj, field_name, label): """ Check a key of `prepopulated_fields` dictionary, i.e. check that it is a name of existing field and the field is one of the allowed types. """ try: field = obj.model._meta.get_field(field_name) except FieldDoesNotExist: return refer_to_missing_field(field=field_name, option=label, obj=obj, id='admin.E027') else: if isinstance(field, (models.DateTimeField, models.ForeignKey, models.ManyToManyField)): return [ checks.Error( "The value of '%s' refers to '%s', which must not be a DateTimeField, " "a ForeignKey, a OneToOneField, or a ManyToManyField." % (label, field_name), obj=obj.__class__, id='admin.E028', ) ] else: return [] def _check_prepopulated_fields_value(self, obj, val, label): """ Check a value of `prepopulated_fields` dictionary, i.e. it's an iterable of existing fields. """ if not isinstance(val, (list, tuple)): return must_be('a list or tuple', option=label, obj=obj, id='admin.E029') else: return list(chain.from_iterable( self._check_prepopulated_fields_value_item(obj, subfield_name, "%s[%r]" % (label, index)) for index, subfield_name in enumerate(val) )) def _check_prepopulated_fields_value_item(self, obj, field_name, label): """ For `prepopulated_fields` equal to {"slug": ("title",)}, `field_name` is "title". """ try: obj.model._meta.get_field(field_name) except FieldDoesNotExist: return refer_to_missing_field(field=field_name, option=label, obj=obj, id='admin.E030') else: return [] def _check_ordering(self, obj): """ Check that ordering refers to existing fields or is random. """ # ordering = None if obj.ordering is None: # The default value is None return [] elif not isinstance(obj.ordering, (list, tuple)): return must_be('a list or tuple', option='ordering', obj=obj, id='admin.E031') else: return list(chain.from_iterable( self._check_ordering_item(obj, field_name, 'ordering[%d]' % index) for index, field_name in enumerate(obj.ordering) )) def _check_ordering_item(self, obj, field_name, label): """ Check that `ordering` refers to existing fields. """ if isinstance(field_name, (Combinable, OrderBy)): if not isinstance(field_name, OrderBy): field_name = field_name.asc() if isinstance(field_name.expression, F): field_name = field_name.expression.name else: return [] if field_name == '?' and len(obj.ordering) != 1: return [ checks.Error( "The value of 'ordering' has the random ordering marker '?', " "but contains other fields as well.", hint='Either remove the "?", or remove the other fields.', obj=obj.__class__, id='admin.E032', ) ] elif field_name == '?': return [] elif LOOKUP_SEP in field_name: # Skip ordering in the format field1__field2 (FIXME: checking # this format would be nice, but it's a little fiddly). return [] else: if field_name.startswith('-'): field_name = field_name[1:] if field_name == 'pk': return [] try: obj.model._meta.get_field(field_name) except FieldDoesNotExist: return refer_to_missing_field(field=field_name, option=label, obj=obj, id='admin.E033') else: return [] def _check_readonly_fields(self, obj): """ Check that readonly_fields refers to proper attribute or field. """ if obj.readonly_fields == (): return [] elif not isinstance(obj.readonly_fields, (list, tuple)): return must_be('a list or tuple', option='readonly_fields', obj=obj, id='admin.E034') else: return list(chain.from_iterable( self._check_readonly_fields_item(obj, field_name, "readonly_fields[%d]" % index) for index, field_name in enumerate(obj.readonly_fields) )) def _check_readonly_fields_item(self, obj, field_name, label): if callable(field_name): return [] elif hasattr(obj, field_name): return [] elif hasattr(obj.model, field_name): return [] else: try: obj.model._meta.get_field(field_name) except FieldDoesNotExist: return [ checks.Error( "The value of '%s' is not a callable, an attribute of '%s', or an attribute of '%s.%s'." % ( label, obj.__class__.__name__, obj.model._meta.app_label, obj.model._meta.object_name ), obj=obj.__class__, id='admin.E035', ) ] else: return [] class ModelAdminChecks(BaseModelAdminChecks): def check(self, admin_obj, **kwargs): return [ *super().check(admin_obj), *self._check_save_as(admin_obj), *self._check_save_on_top(admin_obj), *self._check_inlines(admin_obj), *self._check_list_display(admin_obj), *self._check_list_display_links(admin_obj), *self._check_list_filter(admin_obj), *self._check_list_select_related(admin_obj), *self._check_list_per_page(admin_obj), *self._check_list_max_show_all(admin_obj), *self._check_list_editable(admin_obj), *self._check_search_fields(admin_obj), *self._check_date_hierarchy(admin_obj), *self._check_action_permission_methods(admin_obj), *self._check_actions_uniqueness(admin_obj), ] def _check_save_as(self, obj): """ Check save_as is a boolean. """ if not isinstance(obj.save_as, bool): return must_be('a boolean', option='save_as', obj=obj, id='admin.E101') else: return [] def _check_save_on_top(self, obj): """ Check save_on_top is a boolean. """ if not isinstance(obj.save_on_top, bool): return must_be('a boolean', option='save_on_top', obj=obj, id='admin.E102') else: return [] def _check_inlines(self, obj): """ Check all inline model admin classes. """ if not isinstance(obj.inlines, (list, tuple)): return must_be('a list or tuple', option='inlines', obj=obj, id='admin.E103') else: return list(chain.from_iterable( self._check_inlines_item(obj, item, "inlines[%d]" % index) for index, item in enumerate(obj.inlines) )) def _check_inlines_item(self, obj, inline, label): """ Check one inline model admin. """ try: inline_label = inline.__module__ + '.' + inline.__name__ except AttributeError: return [ checks.Error( "'%s' must inherit from 'InlineModelAdmin'." % obj, obj=obj.__class__, id='admin.E104', ) ] from django.contrib.admin.options import InlineModelAdmin if not _issubclass(inline, InlineModelAdmin): return [ checks.Error( "'%s' must inherit from 'InlineModelAdmin'." % inline_label, obj=obj.__class__, id='admin.E104', ) ] elif not inline.model: return [ checks.Error( "'%s' must have a 'model' attribute." % inline_label, obj=obj.__class__, id='admin.E105', ) ] elif not _issubclass(inline.model, models.Model): return must_be('a Model', option='%s.model' % inline_label, obj=obj, id='admin.E106') else: return inline(obj.model, obj.admin_site).check() def _check_list_display(self, obj): """ Check that list_display only contains fields or usable attributes. """ if not isinstance(obj.list_display, (list, tuple)): return must_be('a list or tuple', option='list_display', obj=obj, id='admin.E107') else: return list(chain.from_iterable( self._check_list_display_item(obj, item, "list_display[%d]" % index) for index, item in enumerate(obj.list_display) )) def _check_list_display_item(self, obj, item, label): if callable(item): return [] elif hasattr(obj, item): return [] try: field = obj.model._meta.get_field(item) except FieldDoesNotExist: try: field = getattr(obj.model, item) except AttributeError: return [ checks.Error( "The value of '%s' refers to '%s', which is not a " "callable, an attribute of '%s', or an attribute or " "method on '%s.%s'." % ( label, item, obj.__class__.__name__, obj.model._meta.app_label, obj.model._meta.object_name, ), obj=obj.__class__, id='admin.E108', ) ] if isinstance(field, models.ManyToManyField): return [ checks.Error( "The value of '%s' must not be a ManyToManyField." % label, obj=obj.__class__, id='admin.E109', ) ] return [] def _check_list_display_links(self, obj): """ Check that list_display_links is a unique subset of list_display. """ from django.contrib.admin.options import ModelAdmin if obj.list_display_links is None: return [] elif not isinstance(obj.list_display_links, (list, tuple)): return must_be('a list, a tuple, or None', option='list_display_links', obj=obj, id='admin.E110') # Check only if ModelAdmin.get_list_display() isn't overridden. elif obj.get_list_display.__func__ is ModelAdmin.get_list_display: return list(chain.from_iterable( self._check_list_display_links_item(obj, field_name, "list_display_links[%d]" % index) for index, field_name in enumerate(obj.list_display_links) )) return [] def _check_list_display_links_item(self, obj, field_name, label): if field_name not in obj.list_display: return [ checks.Error( "The value of '%s' refers to '%s', which is not defined in 'list_display'." % ( label, field_name ), obj=obj.__class__, id='admin.E111', ) ] else: return [] def _check_list_filter(self, obj): if not isinstance(obj.list_filter, (list, tuple)): return must_be('a list or tuple', option='list_filter', obj=obj, id='admin.E112') else: return list(chain.from_iterable( self._check_list_filter_item(obj, item, "list_filter[%d]" % index) for index, item in enumerate(obj.list_filter) )) def _check_list_filter_item(self, obj, item, label): """ Check one item of `list_filter`, i.e. check if it is one of three options: 1. 'field' -- a basic field filter, possibly w/ relationships (e.g. 'field__rel') 2. ('field', SomeFieldListFilter) - a field-based list filter class 3. SomeListFilter - a non-field list filter class """ from django.contrib.admin import ListFilter, FieldListFilter if callable(item) and not isinstance(item, models.Field): # If item is option 3, it should be a ListFilter... if not _issubclass(item, ListFilter): return must_inherit_from(parent='ListFilter', option=label, obj=obj, id='admin.E113') # ... but not a FieldListFilter. elif issubclass(item, FieldListFilter): return [ checks.Error( "The value of '%s' must not inherit from 'FieldListFilter'." % label, obj=obj.__class__, id='admin.E114', ) ] else: return [] elif isinstance(item, (tuple, list)): # item is option #2 field, list_filter_class = item if not _issubclass(list_filter_class, FieldListFilter): return must_inherit_from(parent='FieldListFilter', option='%s[1]' % label, obj=obj, id='admin.E115') else: return [] else: # item is option #1 field = item # Validate the field string try: get_fields_from_path(obj.model, field) except (NotRelationField, FieldDoesNotExist): return [ checks.Error( "The value of '%s' refers to '%s', which does not refer to a Field." % (label, field), obj=obj.__class__, id='admin.E116', ) ] else: return [] def _check_list_select_related(self, obj): """ Check that list_select_related is a boolean, a list or a tuple. """ if not isinstance(obj.list_select_related, (bool, list, tuple)): return must_be('a boolean, tuple or list', option='list_select_related', obj=obj, id='admin.E117') else: return [] def _check_list_per_page(self, obj): """ Check that list_per_page is an integer. """ if not isinstance(obj.list_per_page, int): return must_be('an integer', option='list_per_page', obj=obj, id='admin.E118') else: return [] def _check_list_max_show_all(self, obj): """ Check that list_max_show_all is an integer. """ if not isinstance(obj.list_max_show_all, int): return must_be('an integer', option='list_max_show_all', obj=obj, id='admin.E119') else: return [] def _check_list_editable(self, obj): """ Check that list_editable is a sequence of editable fields from list_display without first element. """ if not isinstance(obj.list_editable, (list, tuple)): return must_be('a list or tuple', option='list_editable', obj=obj, id='admin.E120') else: return list(chain.from_iterable( self._check_list_editable_item(obj, item, "list_editable[%d]" % index) for index, item in enumerate(obj.list_editable) )) def _check_list_editable_item(self, obj, field_name, label): try: field = obj.model._meta.get_field(field_name) except FieldDoesNotExist: return refer_to_missing_field(field=field_name, option=label, obj=obj, id='admin.E121') else: if field_name not in obj.list_display: return [ checks.Error( "The value of '%s' refers to '%s', which is not " "contained in 'list_display'." % (label, field_name), obj=obj.__class__, id='admin.E122', ) ] elif obj.list_display_links and field_name in obj.list_display_links: return [ checks.Error( "The value of '%s' cannot be in both 'list_editable' and 'list_display_links'." % field_name, obj=obj.__class__, id='admin.E123', ) ] # If list_display[0] is in list_editable, check that # list_display_links is set. See #22792 and #26229 for use cases. elif (obj.list_display[0] == field_name and not obj.list_display_links and obj.list_display_links is not None): return [ checks.Error( "The value of '%s' refers to the first field in 'list_display' ('%s'), " "which cannot be used unless 'list_display_links' is set." % ( label, obj.list_display[0] ), obj=obj.__class__, id='admin.E124', ) ] elif not field.editable: return [ checks.Error( "The value of '%s' refers to '%s', which is not editable through the admin." % ( label, field_name ), obj=obj.__class__, id='admin.E125', ) ] else: return [] def _check_search_fields(self, obj): """ Check search_fields is a sequence. """ if not isinstance(obj.search_fields, (list, tuple)): return must_be('a list or tuple', option='search_fields', obj=obj, id='admin.E126') else: return [] def _check_date_hierarchy(self, obj): """ Check that date_hierarchy refers to DateField or DateTimeField. """ if obj.date_hierarchy is None: return [] else: try: field = get_fields_from_path(obj.model, obj.date_hierarchy)[-1] except (NotRelationField, FieldDoesNotExist): return [ checks.Error( "The value of 'date_hierarchy' refers to '%s', which " "does not refer to a Field." % obj.date_hierarchy, obj=obj.__class__, id='admin.E127', ) ] else: if not isinstance(field, (models.DateField, models.DateTimeField)): return must_be('a DateField or DateTimeField', option='date_hierarchy', obj=obj, id='admin.E128') else: return [] def _check_action_permission_methods(self, obj): """ Actions with an allowed_permission attribute require the ModelAdmin to implement a has_<perm>_permission() method for each permission. """ actions = obj._get_base_actions() errors = [] for func, name, _ in actions: if not hasattr(func, 'allowed_permissions'): continue for permission in func.allowed_permissions: method_name = 'has_%s_permission' % permission if not hasattr(obj, method_name): errors.append( checks.Error( '%s must define a %s() method for the %s action.' % ( obj.__class__.__name__, method_name, func.__name__, ), obj=obj.__class__, id='admin.E129', ) ) return errors def _check_actions_uniqueness(self, obj): """Check that every action has a unique __name__.""" errors = [] names = collections.Counter(name for _, name, _ in obj._get_base_actions()) for name, count in names.items(): if count > 1: errors.append(checks.Error( '__name__ attributes of actions defined in %s must be ' 'unique. Name %r is not unique.' % ( obj.__class__.__name__, name, ), obj=obj.__class__, id='admin.E130', )) return errors class InlineModelAdminChecks(BaseModelAdminChecks): def check(self, inline_obj, **kwargs): parent_model = inline_obj.parent_model return [ *super().check(inline_obj), *self._check_relation(inline_obj, parent_model), *self._check_exclude_of_parent_model(inline_obj, parent_model), *self._check_extra(inline_obj), *self._check_max_num(inline_obj), *self._check_min_num(inline_obj), *self._check_formset(inline_obj), ] def _check_exclude_of_parent_model(self, obj, parent_model): # Do not perform more specific checks if the base checks result in an # error. errors = super()._check_exclude(obj) if errors: return [] # Skip if `fk_name` is invalid. if self._check_relation(obj, parent_model): return [] if obj.exclude is None: return [] fk = _get_foreign_key(parent_model, obj.model, fk_name=obj.fk_name) if fk.name in obj.exclude: return [ checks.Error( "Cannot exclude the field '%s', because it is the foreign key " "to the parent model '%s.%s'." % ( fk.name, parent_model._meta.app_label, parent_model._meta.object_name ), obj=obj.__class__, id='admin.E201', ) ] else: return [] def _check_relation(self, obj, parent_model): try: _get_foreign_key(parent_model, obj.model, fk_name=obj.fk_name) except ValueError as e: return [checks.Error(e.args[0], obj=obj.__class__, id='admin.E202')] else: return [] def _check_extra(self, obj): """ Check that extra is an integer. """ if not isinstance(obj.extra, int): return must_be('an integer', option='extra', obj=obj, id='admin.E203') else: return [] def _check_max_num(self, obj): """ Check that max_num is an integer. """ if obj.max_num is None: return [] elif not isinstance(obj.max_num, int): return must_be('an integer', option='max_num', obj=obj, id='admin.E204') else: return [] def _check_min_num(self, obj): """ Check that min_num is an integer. """ if obj.min_num is None: return [] elif not isinstance(obj.min_num, int): return must_be('an integer', option='min_num', obj=obj, id='admin.E205') else: return [] def _check_formset(self, obj): """ Check formset is a subclass of BaseModelFormSet. """ if not _issubclass(obj.formset, BaseModelFormSet): return must_inherit_from(parent='BaseModelFormSet', option='formset', obj=obj, id='admin.E206') else: return [] def must_be(type, option, obj, id): return [ checks.Error( "The value of '%s' must be %s." % (option, type), obj=obj.__class__, id=id, ), ] def must_inherit_from(parent, option, obj, id): return [ checks.Error( "The value of '%s' must inherit from '%s'." % (option, parent), obj=obj.__class__, id=id, ), ] def refer_to_missing_field(field, option, obj, id): return [ checks.Error( "The value of '%s' refers to '%s', which is not an attribute of '%s.%s'." % ( option, field, obj.model._meta.app_label, obj.model._meta.object_name ), obj=obj.__class__, id=id, ), ]
4fac2d13a127f6726b995557169aec0c432453bf82091c84bae666196edc18f1
""" This encapsulates the logic for displaying filters in the Django admin. Filters are specified in models with the "list_filter" option. Each filter subclass knows how to display a filter for a field that passes a certain test -- e.g. being a DateField or ForeignKey. """ import datetime from django.contrib.admin.options import IncorrectLookupParameters from django.contrib.admin.utils import ( get_model_from_relation, prepare_lookup_value, reverse_field_path, ) from django.core.exceptions import ImproperlyConfigured, ValidationError from django.db import models from django.utils import timezone from django.utils.translation import gettext_lazy as _ class ListFilter: title = None # Human-readable title to appear in the right sidebar. template = 'admin/filter.html' def __init__(self, request, params, model, model_admin): # This dictionary will eventually contain the request's query string # parameters actually used by this filter. self.used_parameters = {} if self.title is None: raise ImproperlyConfigured( "The list filter '%s' does not specify a 'title'." % self.__class__.__name__ ) def has_output(self): """ Return True if some choices would be output for this filter. """ raise NotImplementedError('subclasses of ListFilter must provide a has_output() method') def choices(self, changelist): """ Return choices ready to be output in the template. `changelist` is the ChangeList to be displayed. """ raise NotImplementedError('subclasses of ListFilter must provide a choices() method') def queryset(self, request, queryset): """ Return the filtered queryset. """ raise NotImplementedError('subclasses of ListFilter must provide a queryset() method') def expected_parameters(self): """ Return the list of parameter names that are expected from the request's query string and that will be used by this filter. """ raise NotImplementedError('subclasses of ListFilter must provide an expected_parameters() method') class SimpleListFilter(ListFilter): # The parameter that should be used in the query string for that filter. parameter_name = None def __init__(self, request, params, model, model_admin): super().__init__(request, params, model, model_admin) if self.parameter_name is None: raise ImproperlyConfigured( "The list filter '%s' does not specify a 'parameter_name'." % self.__class__.__name__ ) if self.parameter_name in params: value = params.pop(self.parameter_name) self.used_parameters[self.parameter_name] = value lookup_choices = self.lookups(request, model_admin) if lookup_choices is None: lookup_choices = () self.lookup_choices = list(lookup_choices) def has_output(self): return len(self.lookup_choices) > 0 def value(self): """ Return the value (in string format) provided in the request's query string for this filter, if any, or None if the value wasn't provided. """ return self.used_parameters.get(self.parameter_name) def lookups(self, request, model_admin): """ Must be overridden to return a list of tuples (value, verbose value) """ raise NotImplementedError( 'The SimpleListFilter.lookups() method must be overridden to ' 'return a list of tuples (value, verbose value).' ) def expected_parameters(self): return [self.parameter_name] def choices(self, changelist): yield { 'selected': self.value() is None, 'query_string': changelist.get_query_string(remove=[self.parameter_name]), 'display': _('All'), } for lookup, title in self.lookup_choices: yield { 'selected': self.value() == str(lookup), 'query_string': changelist.get_query_string({self.parameter_name: lookup}), 'display': title, } class FieldListFilter(ListFilter): _field_list_filters = [] _take_priority_index = 0 def __init__(self, field, request, params, model, model_admin, field_path): self.field = field self.field_path = field_path self.title = getattr(field, 'verbose_name', field_path) super().__init__(request, params, model, model_admin) for p in self.expected_parameters(): if p in params: value = params.pop(p) self.used_parameters[p] = prepare_lookup_value(p, value) def has_output(self): return True def queryset(self, request, queryset): try: return queryset.filter(**self.used_parameters) except (ValueError, ValidationError) as e: # Fields may raise a ValueError or ValidationError when converting # the parameters to the correct type. raise IncorrectLookupParameters(e) @classmethod def register(cls, test, list_filter_class, take_priority=False): if take_priority: # This is to allow overriding the default filters for certain types # of fields with some custom filters. The first found in the list # is used in priority. cls._field_list_filters.insert( cls._take_priority_index, (test, list_filter_class)) cls._take_priority_index += 1 else: cls._field_list_filters.append((test, list_filter_class)) @classmethod def create(cls, field, request, params, model, model_admin, field_path): for test, list_filter_class in cls._field_list_filters: if test(field): return list_filter_class(field, request, params, model, model_admin, field_path=field_path) class RelatedFieldListFilter(FieldListFilter): def __init__(self, field, request, params, model, model_admin, field_path): other_model = get_model_from_relation(field) self.lookup_kwarg = '%s__%s__exact' % (field_path, field.target_field.name) self.lookup_kwarg_isnull = '%s__isnull' % field_path self.lookup_val = params.get(self.lookup_kwarg) self.lookup_val_isnull = params.get(self.lookup_kwarg_isnull) super().__init__(field, request, params, model, model_admin, field_path) self.lookup_choices = self.field_choices(field, request, model_admin) if hasattr(field, 'verbose_name'): self.lookup_title = field.verbose_name else: self.lookup_title = other_model._meta.verbose_name self.title = self.lookup_title self.empty_value_display = model_admin.get_empty_value_display() @property def include_empty_choice(self): """ Return True if a "(None)" choice should be included, which filters out everything except empty relationships. """ return self.field.null or (self.field.is_relation and self.field.many_to_many) def has_output(self): if self.include_empty_choice: extra = 1 else: extra = 0 return len(self.lookup_choices) + extra > 1 def expected_parameters(self): return [self.lookup_kwarg, self.lookup_kwarg_isnull] def field_admin_ordering(self, field, request, model_admin): """ Return the model admin's ordering for related field, if provided. """ related_admin = model_admin.admin_site._registry.get(field.remote_field.model) if related_admin is not None: return related_admin.get_ordering(request) return () def field_choices(self, field, request, model_admin): ordering = self.field_admin_ordering(field, request, model_admin) return field.get_choices(include_blank=False, ordering=ordering) def choices(self, changelist): yield { 'selected': self.lookup_val is None and not self.lookup_val_isnull, 'query_string': changelist.get_query_string(remove=[self.lookup_kwarg, self.lookup_kwarg_isnull]), 'display': _('All'), } for pk_val, val in self.lookup_choices: yield { 'selected': self.lookup_val == str(pk_val), 'query_string': changelist.get_query_string({self.lookup_kwarg: pk_val}, [self.lookup_kwarg_isnull]), 'display': val, } if self.include_empty_choice: yield { 'selected': bool(self.lookup_val_isnull), 'query_string': changelist.get_query_string({self.lookup_kwarg_isnull: 'True'}, [self.lookup_kwarg]), 'display': self.empty_value_display, } FieldListFilter.register(lambda f: f.remote_field, RelatedFieldListFilter) class BooleanFieldListFilter(FieldListFilter): def __init__(self, field, request, params, model, model_admin, field_path): self.lookup_kwarg = '%s__exact' % field_path self.lookup_kwarg2 = '%s__isnull' % field_path self.lookup_val = params.get(self.lookup_kwarg) self.lookup_val2 = params.get(self.lookup_kwarg2) super().__init__(field, request, params, model, model_admin, field_path) if (self.used_parameters and self.lookup_kwarg in self.used_parameters and self.used_parameters[self.lookup_kwarg] in ('1', '0')): self.used_parameters[self.lookup_kwarg] = bool(int(self.used_parameters[self.lookup_kwarg])) def expected_parameters(self): return [self.lookup_kwarg, self.lookup_kwarg2] def choices(self, changelist): for lookup, title in ( (None, _('All')), ('1', _('Yes')), ('0', _('No'))): yield { 'selected': self.lookup_val == lookup and not self.lookup_val2, 'query_string': changelist.get_query_string({self.lookup_kwarg: lookup}, [self.lookup_kwarg2]), 'display': title, } if self.field.null: yield { 'selected': self.lookup_val2 == 'True', 'query_string': changelist.get_query_string({self.lookup_kwarg2: 'True'}, [self.lookup_kwarg]), 'display': _('Unknown'), } FieldListFilter.register(lambda f: isinstance(f, models.BooleanField), BooleanFieldListFilter) class ChoicesFieldListFilter(FieldListFilter): def __init__(self, field, request, params, model, model_admin, field_path): self.lookup_kwarg = '%s__exact' % field_path self.lookup_kwarg_isnull = '%s__isnull' % field_path self.lookup_val = params.get(self.lookup_kwarg) self.lookup_val_isnull = params.get(self.lookup_kwarg_isnull) super().__init__(field, request, params, model, model_admin, field_path) def expected_parameters(self): return [self.lookup_kwarg, self.lookup_kwarg_isnull] def choices(self, changelist): yield { 'selected': self.lookup_val is None, 'query_string': changelist.get_query_string(remove=[self.lookup_kwarg, self.lookup_kwarg_isnull]), 'display': _('All') } none_title = '' for lookup, title in self.field.flatchoices: if lookup is None: none_title = title continue yield { 'selected': str(lookup) == self.lookup_val, 'query_string': changelist.get_query_string({self.lookup_kwarg: lookup}, [self.lookup_kwarg_isnull]), 'display': title, } if none_title: yield { 'selected': bool(self.lookup_val_isnull), 'query_string': changelist.get_query_string({self.lookup_kwarg_isnull: 'True'}, [self.lookup_kwarg]), 'display': none_title, } FieldListFilter.register(lambda f: bool(f.choices), ChoicesFieldListFilter) class DateFieldListFilter(FieldListFilter): def __init__(self, field, request, params, model, model_admin, field_path): self.field_generic = '%s__' % field_path self.date_params = {k: v for k, v in params.items() if k.startswith(self.field_generic)} now = timezone.now() # When time zone support is enabled, convert "now" to the user's time # zone so Django's definition of "Today" matches what the user expects. if timezone.is_aware(now): now = timezone.localtime(now) if isinstance(field, models.DateTimeField): today = now.replace(hour=0, minute=0, second=0, microsecond=0) else: # field is a models.DateField today = now.date() tomorrow = today + datetime.timedelta(days=1) if today.month == 12: next_month = today.replace(year=today.year + 1, month=1, day=1) else: next_month = today.replace(month=today.month + 1, day=1) next_year = today.replace(year=today.year + 1, month=1, day=1) self.lookup_kwarg_since = '%s__gte' % field_path self.lookup_kwarg_until = '%s__lt' % field_path self.links = ( (_('Any date'), {}), (_('Today'), { self.lookup_kwarg_since: str(today), self.lookup_kwarg_until: str(tomorrow), }), (_('Past 7 days'), { self.lookup_kwarg_since: str(today - datetime.timedelta(days=7)), self.lookup_kwarg_until: str(tomorrow), }), (_('This month'), { self.lookup_kwarg_since: str(today.replace(day=1)), self.lookup_kwarg_until: str(next_month), }), (_('This year'), { self.lookup_kwarg_since: str(today.replace(month=1, day=1)), self.lookup_kwarg_until: str(next_year), }), ) if field.null: self.lookup_kwarg_isnull = '%s__isnull' % field_path self.links += ( (_('No date'), {self.field_generic + 'isnull': 'True'}), (_('Has date'), {self.field_generic + 'isnull': 'False'}), ) super().__init__(field, request, params, model, model_admin, field_path) def expected_parameters(self): params = [self.lookup_kwarg_since, self.lookup_kwarg_until] if self.field.null: params.append(self.lookup_kwarg_isnull) return params def choices(self, changelist): for title, param_dict in self.links: yield { 'selected': self.date_params == param_dict, 'query_string': changelist.get_query_string(param_dict, [self.field_generic]), 'display': title, } FieldListFilter.register( lambda f: isinstance(f, models.DateField), DateFieldListFilter) # This should be registered last, because it's a last resort. For example, # if a field is eligible to use the BooleanFieldListFilter, that'd be much # more appropriate, and the AllValuesFieldListFilter won't get used for it. class AllValuesFieldListFilter(FieldListFilter): def __init__(self, field, request, params, model, model_admin, field_path): self.lookup_kwarg = field_path self.lookup_kwarg_isnull = '%s__isnull' % field_path self.lookup_val = params.get(self.lookup_kwarg) self.lookup_val_isnull = params.get(self.lookup_kwarg_isnull) self.empty_value_display = model_admin.get_empty_value_display() parent_model, reverse_path = reverse_field_path(model, field_path) # Obey parent ModelAdmin queryset when deciding which options to show if model == parent_model: queryset = model_admin.get_queryset(request) else: queryset = parent_model._default_manager.all() self.lookup_choices = queryset.distinct().order_by(field.name).values_list(field.name, flat=True) super().__init__(field, request, params, model, model_admin, field_path) def expected_parameters(self): return [self.lookup_kwarg, self.lookup_kwarg_isnull] def choices(self, changelist): yield { 'selected': self.lookup_val is None and self.lookup_val_isnull is None, 'query_string': changelist.get_query_string(remove=[self.lookup_kwarg, self.lookup_kwarg_isnull]), 'display': _('All'), } include_none = False for val in self.lookup_choices: if val is None: include_none = True continue val = str(val) yield { 'selected': self.lookup_val == val, 'query_string': changelist.get_query_string({self.lookup_kwarg: val}, [self.lookup_kwarg_isnull]), 'display': val, } if include_none: yield { 'selected': bool(self.lookup_val_isnull), 'query_string': changelist.get_query_string({self.lookup_kwarg_isnull: 'True'}, [self.lookup_kwarg]), 'display': self.empty_value_display, } FieldListFilter.register(lambda f: True, AllValuesFieldListFilter) class RelatedOnlyFieldListFilter(RelatedFieldListFilter): def field_choices(self, field, request, model_admin): pk_qs = model_admin.get_queryset(request).distinct().values_list('%s__pk' % self.field_path, flat=True) ordering = self.field_admin_ordering(field, request, model_admin) return field.get_choices(include_blank=False, limit_choices_to={'pk__in': pk_qs}, ordering=ordering) class EmptyFieldListFilter(FieldListFilter): def __init__(self, field, request, params, model, model_admin, field_path): if not field.empty_strings_allowed and not field.null: raise ImproperlyConfigured( "The list filter '%s' cannot be used with field '%s' which " "doesn't allow empty strings and nulls." % ( self.__class__.__name__, field.name, ) ) self.lookup_kwarg = '%s__isempty' % field_path self.lookup_val = params.get(self.lookup_kwarg) super().__init__(field, request, params, model, model_admin, field_path) def queryset(self, request, queryset): if self.lookup_kwarg not in self.used_parameters: return queryset if self.lookup_val not in ('0', '1'): raise IncorrectLookupParameters lookup_condition = models.Q() if self.field.empty_strings_allowed: lookup_condition |= models.Q(**{self.field_path: ''}) if self.field.null: lookup_condition |= models.Q(**{'%s__isnull' % self.field_path: True}) if self.lookup_val == '1': return queryset.filter(lookup_condition) return queryset.exclude(lookup_condition) def expected_parameters(self): return [self.lookup_kwarg] def choices(self, changelist): for lookup, title in ( (None, _('All')), ('1', _('Empty')), ('0', _('Not empty')), ): yield { 'selected': self.lookup_val == lookup, 'query_string': changelist.get_query_string({self.lookup_kwarg: lookup}), 'display': title, }
b94eb67f8e16caab74a74feabc2c954c5a5da646ef3d99a6796252876b648a8a
from unittest import mock, skipUnless from django.conf.global_settings import PASSWORD_HASHERS from django.contrib.auth.hashers import ( UNUSABLE_PASSWORD_PREFIX, UNUSABLE_PASSWORD_SUFFIX_LENGTH, BasePasswordHasher, PBKDF2PasswordHasher, PBKDF2SHA1PasswordHasher, check_password, get_hasher, identify_hasher, is_password_usable, make_password, ) from django.test import SimpleTestCase from django.test.utils import override_settings try: import crypt except ImportError: crypt = None else: # On some platforms (e.g. OpenBSD), crypt.crypt() always return None. if crypt.crypt('') is None: crypt = None try: import bcrypt except ImportError: bcrypt = None try: import argon2 except ImportError: argon2 = None class PBKDF2SingleIterationHasher(PBKDF2PasswordHasher): iterations = 1 @override_settings(PASSWORD_HASHERS=PASSWORD_HASHERS) class TestUtilsHashPass(SimpleTestCase): def test_simple(self): encoded = make_password('lètmein') self.assertTrue(encoded.startswith('pbkdf2_sha256$')) self.assertTrue(is_password_usable(encoded)) self.assertTrue(check_password('lètmein', encoded)) self.assertFalse(check_password('lètmeinz', encoded)) # Blank passwords blank_encoded = make_password('') self.assertTrue(blank_encoded.startswith('pbkdf2_sha256$')) self.assertTrue(is_password_usable(blank_encoded)) self.assertTrue(check_password('', blank_encoded)) self.assertFalse(check_password(' ', blank_encoded)) def test_pbkdf2(self): encoded = make_password('lètmein', 'seasalt', 'pbkdf2_sha256') self.assertEqual(encoded, 'pbkdf2_sha256$216000$seasalt$youGZxOw6ZOcfrXv2i8/AhrnpZflJJ9EshS9XmUJTUg=') self.assertTrue(is_password_usable(encoded)) self.assertTrue(check_password('lètmein', encoded)) self.assertFalse(check_password('lètmeinz', encoded)) self.assertEqual(identify_hasher(encoded).algorithm, "pbkdf2_sha256") # Blank passwords blank_encoded = make_password('', 'seasalt', 'pbkdf2_sha256') self.assertTrue(blank_encoded.startswith('pbkdf2_sha256$')) self.assertTrue(is_password_usable(blank_encoded)) self.assertTrue(check_password('', blank_encoded)) self.assertFalse(check_password(' ', blank_encoded)) @override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher']) def test_sha1(self): encoded = make_password('lètmein', 'seasalt', 'sha1') self.assertEqual(encoded, 'sha1$seasalt$cff36ea83f5706ce9aa7454e63e431fc726b2dc8') self.assertTrue(is_password_usable(encoded)) self.assertTrue(check_password('lètmein', encoded)) self.assertFalse(check_password('lètmeinz', encoded)) self.assertEqual(identify_hasher(encoded).algorithm, "sha1") # Blank passwords blank_encoded = make_password('', 'seasalt', 'sha1') self.assertTrue(blank_encoded.startswith('sha1$')) self.assertTrue(is_password_usable(blank_encoded)) self.assertTrue(check_password('', blank_encoded)) self.assertFalse(check_password(' ', blank_encoded)) @override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.MD5PasswordHasher']) def test_md5(self): encoded = make_password('lètmein', 'seasalt', 'md5') self.assertEqual(encoded, 'md5$seasalt$3f86d0d3d465b7b458c231bf3555c0e3') self.assertTrue(is_password_usable(encoded)) self.assertTrue(check_password('lètmein', encoded)) self.assertFalse(check_password('lètmeinz', encoded)) self.assertEqual(identify_hasher(encoded).algorithm, "md5") # Blank passwords blank_encoded = make_password('', 'seasalt', 'md5') self.assertTrue(blank_encoded.startswith('md5$')) self.assertTrue(is_password_usable(blank_encoded)) self.assertTrue(check_password('', blank_encoded)) self.assertFalse(check_password(' ', blank_encoded)) @override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.UnsaltedMD5PasswordHasher']) def test_unsalted_md5(self): encoded = make_password('lètmein', '', 'unsalted_md5') self.assertEqual(encoded, '88a434c88cca4e900f7874cd98123f43') self.assertTrue(is_password_usable(encoded)) self.assertTrue(check_password('lètmein', encoded)) self.assertFalse(check_password('lètmeinz', encoded)) self.assertEqual(identify_hasher(encoded).algorithm, "unsalted_md5") # Alternate unsalted syntax alt_encoded = "md5$$%s" % encoded self.assertTrue(is_password_usable(alt_encoded)) self.assertTrue(check_password('lètmein', alt_encoded)) self.assertFalse(check_password('lètmeinz', alt_encoded)) # Blank passwords blank_encoded = make_password('', '', 'unsalted_md5') self.assertTrue(is_password_usable(blank_encoded)) self.assertTrue(check_password('', blank_encoded)) self.assertFalse(check_password(' ', blank_encoded)) @override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.UnsaltedSHA1PasswordHasher']) def test_unsalted_sha1(self): encoded = make_password('lètmein', '', 'unsalted_sha1') self.assertEqual(encoded, 'sha1$$6d138ca3ae545631b3abd71a4f076ce759c5700b') self.assertTrue(is_password_usable(encoded)) self.assertTrue(check_password('lètmein', encoded)) self.assertFalse(check_password('lètmeinz', encoded)) self.assertEqual(identify_hasher(encoded).algorithm, "unsalted_sha1") # Raw SHA1 isn't acceptable alt_encoded = encoded[6:] self.assertFalse(check_password('lètmein', alt_encoded)) # Blank passwords blank_encoded = make_password('', '', 'unsalted_sha1') self.assertTrue(blank_encoded.startswith('sha1$')) self.assertTrue(is_password_usable(blank_encoded)) self.assertTrue(check_password('', blank_encoded)) self.assertFalse(check_password(' ', blank_encoded)) @skipUnless(crypt, "no crypt module to generate password.") @override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.CryptPasswordHasher']) def test_crypt(self): encoded = make_password('lètmei', 'ab', 'crypt') self.assertEqual(encoded, 'crypt$$ab1Hv2Lg7ltQo') self.assertTrue(is_password_usable(encoded)) self.assertTrue(check_password('lètmei', encoded)) self.assertFalse(check_password('lètmeiz', encoded)) self.assertEqual(identify_hasher(encoded).algorithm, "crypt") # Blank passwords blank_encoded = make_password('', 'ab', 'crypt') self.assertTrue(blank_encoded.startswith('crypt$')) self.assertTrue(is_password_usable(blank_encoded)) self.assertTrue(check_password('', blank_encoded)) self.assertFalse(check_password(' ', blank_encoded)) @skipUnless(bcrypt, "bcrypt not installed") def test_bcrypt_sha256(self): encoded = make_password('lètmein', hasher='bcrypt_sha256') self.assertTrue(is_password_usable(encoded)) self.assertTrue(encoded.startswith('bcrypt_sha256$')) self.assertTrue(check_password('lètmein', encoded)) self.assertFalse(check_password('lètmeinz', encoded)) self.assertEqual(identify_hasher(encoded).algorithm, "bcrypt_sha256") # password truncation no longer works password = ( 'VSK0UYV6FFQVZ0KG88DYN9WADAADZO1CTSIVDJUNZSUML6IBX7LN7ZS3R5' 'JGB3RGZ7VI7G7DJQ9NI8BQFSRPTG6UWTTVESA5ZPUN' ) encoded = make_password(password, hasher='bcrypt_sha256') self.assertTrue(check_password(password, encoded)) self.assertFalse(check_password(password[:72], encoded)) # Blank passwords blank_encoded = make_password('', hasher='bcrypt_sha256') self.assertTrue(blank_encoded.startswith('bcrypt_sha256$')) self.assertTrue(is_password_usable(blank_encoded)) self.assertTrue(check_password('', blank_encoded)) self.assertFalse(check_password(' ', blank_encoded)) @skipUnless(bcrypt, "bcrypt not installed") @override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.BCryptPasswordHasher']) def test_bcrypt(self): encoded = make_password('lètmein', hasher='bcrypt') self.assertTrue(is_password_usable(encoded)) self.assertTrue(encoded.startswith('bcrypt$')) self.assertTrue(check_password('lètmein', encoded)) self.assertFalse(check_password('lètmeinz', encoded)) self.assertEqual(identify_hasher(encoded).algorithm, "bcrypt") # Blank passwords blank_encoded = make_password('', hasher='bcrypt') self.assertTrue(blank_encoded.startswith('bcrypt$')) self.assertTrue(is_password_usable(blank_encoded)) self.assertTrue(check_password('', blank_encoded)) self.assertFalse(check_password(' ', blank_encoded)) @skipUnless(bcrypt, "bcrypt not installed") @override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.BCryptPasswordHasher']) def test_bcrypt_upgrade(self): hasher = get_hasher('bcrypt') self.assertEqual('bcrypt', hasher.algorithm) self.assertNotEqual(hasher.rounds, 4) old_rounds = hasher.rounds try: # Generate a password with 4 rounds. hasher.rounds = 4 encoded = make_password('letmein', hasher='bcrypt') rounds = hasher.safe_summary(encoded)['work factor'] self.assertEqual(rounds, '04') state = {'upgraded': False} def setter(password): state['upgraded'] = True # No upgrade is triggered. self.assertTrue(check_password('letmein', encoded, setter, 'bcrypt')) self.assertFalse(state['upgraded']) # Revert to the old rounds count and ... hasher.rounds = old_rounds # ... check if the password would get updated to the new count. self.assertTrue(check_password('letmein', encoded, setter, 'bcrypt')) self.assertTrue(state['upgraded']) finally: hasher.rounds = old_rounds @skipUnless(bcrypt, "bcrypt not installed") @override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.BCryptPasswordHasher']) def test_bcrypt_harden_runtime(self): hasher = get_hasher('bcrypt') self.assertEqual('bcrypt', hasher.algorithm) with mock.patch.object(hasher, 'rounds', 4): encoded = make_password('letmein', hasher='bcrypt') with mock.patch.object(hasher, 'rounds', 6), \ mock.patch.object(hasher, 'encode', side_effect=hasher.encode): hasher.harden_runtime('wrong_password', encoded) # Increasing rounds from 4 to 6 means an increase of 4 in workload, # therefore hardening should run 3 times to make the timing the # same (the original encode() call already ran once). self.assertEqual(hasher.encode.call_count, 3) # Get the original salt (includes the original workload factor) algorithm, data = encoded.split('$', 1) expected_call = (('wrong_password', data[:29].encode()),) self.assertEqual(hasher.encode.call_args_list, [expected_call] * 3) def test_unusable(self): encoded = make_password(None) self.assertEqual(len(encoded), len(UNUSABLE_PASSWORD_PREFIX) + UNUSABLE_PASSWORD_SUFFIX_LENGTH) self.assertFalse(is_password_usable(encoded)) self.assertFalse(check_password(None, encoded)) self.assertFalse(check_password(encoded, encoded)) self.assertFalse(check_password(UNUSABLE_PASSWORD_PREFIX, encoded)) self.assertFalse(check_password('', encoded)) self.assertFalse(check_password('lètmein', encoded)) self.assertFalse(check_password('lètmeinz', encoded)) with self.assertRaisesMessage(ValueError, 'Unknown password hashing algorith'): identify_hasher(encoded) # Assert that the unusable passwords actually contain a random part. # This might fail one day due to a hash collision. self.assertNotEqual(encoded, make_password(None), "Random password collision?") def test_unspecified_password(self): """ Makes sure specifying no plain password with a valid encoded password returns `False`. """ self.assertFalse(check_password(None, make_password('lètmein'))) def test_bad_algorithm(self): msg = ( "Unknown password hashing algorithm '%s'. Did you specify it in " "the PASSWORD_HASHERS setting?" ) with self.assertRaisesMessage(ValueError, msg % 'lolcat'): make_password('lètmein', hasher='lolcat') with self.assertRaisesMessage(ValueError, msg % 'lolcat'): identify_hasher('lolcat$salt$hash') def test_is_password_usable(self): passwords = ('lètmein_badencoded', '', None) for password in passwords: with self.subTest(password=password): self.assertIs(is_password_usable(password), True) def test_low_level_pbkdf2(self): hasher = PBKDF2PasswordHasher() encoded = hasher.encode('lètmein', 'seasalt2') self.assertEqual(encoded, 'pbkdf2_sha256$216000$seasalt2$gHyszNJ9lwTG5y3MQUjZe+OJmYVTBPl/y7bYq9dtk8M=') self.assertTrue(hasher.verify('lètmein', encoded)) def test_low_level_pbkdf2_sha1(self): hasher = PBKDF2SHA1PasswordHasher() encoded = hasher.encode('lètmein', 'seasalt2') self.assertEqual(encoded, 'pbkdf2_sha1$216000$seasalt2$E1KH89wMKuPXrrQzifVcG4cBtiA=') self.assertTrue(hasher.verify('lètmein', encoded)) @override_settings( PASSWORD_HASHERS=[ 'django.contrib.auth.hashers.PBKDF2PasswordHasher', 'django.contrib.auth.hashers.SHA1PasswordHasher', 'django.contrib.auth.hashers.MD5PasswordHasher', ], ) def test_upgrade(self): self.assertEqual('pbkdf2_sha256', get_hasher('default').algorithm) for algo in ('sha1', 'md5'): with self.subTest(algo=algo): encoded = make_password('lètmein', hasher=algo) state = {'upgraded': False} def setter(password): state['upgraded'] = True self.assertTrue(check_password('lètmein', encoded, setter)) self.assertTrue(state['upgraded']) def test_no_upgrade(self): encoded = make_password('lètmein') state = {'upgraded': False} def setter(): state['upgraded'] = True self.assertFalse(check_password('WRONG', encoded, setter)) self.assertFalse(state['upgraded']) @override_settings( PASSWORD_HASHERS=[ 'django.contrib.auth.hashers.PBKDF2PasswordHasher', 'django.contrib.auth.hashers.SHA1PasswordHasher', 'django.contrib.auth.hashers.MD5PasswordHasher', ], ) def test_no_upgrade_on_incorrect_pass(self): self.assertEqual('pbkdf2_sha256', get_hasher('default').algorithm) for algo in ('sha1', 'md5'): with self.subTest(algo=algo): encoded = make_password('lètmein', hasher=algo) state = {'upgraded': False} def setter(): state['upgraded'] = True self.assertFalse(check_password('WRONG', encoded, setter)) self.assertFalse(state['upgraded']) def test_pbkdf2_upgrade(self): hasher = get_hasher('default') self.assertEqual('pbkdf2_sha256', hasher.algorithm) self.assertNotEqual(hasher.iterations, 1) old_iterations = hasher.iterations try: # Generate a password with 1 iteration. hasher.iterations = 1 encoded = make_password('letmein') algo, iterations, salt, hash = encoded.split('$', 3) self.assertEqual(iterations, '1') state = {'upgraded': False} def setter(password): state['upgraded'] = True # No upgrade is triggered self.assertTrue(check_password('letmein', encoded, setter)) self.assertFalse(state['upgraded']) # Revert to the old iteration count and ... hasher.iterations = old_iterations # ... check if the password would get updated to the new iteration count. self.assertTrue(check_password('letmein', encoded, setter)) self.assertTrue(state['upgraded']) finally: hasher.iterations = old_iterations def test_pbkdf2_harden_runtime(self): hasher = get_hasher('default') self.assertEqual('pbkdf2_sha256', hasher.algorithm) with mock.patch.object(hasher, 'iterations', 1): encoded = make_password('letmein') with mock.patch.object(hasher, 'iterations', 6), \ mock.patch.object(hasher, 'encode', side_effect=hasher.encode): hasher.harden_runtime('wrong_password', encoded) # Encode should get called once ... self.assertEqual(hasher.encode.call_count, 1) # ... with the original salt and 5 iterations. algorithm, iterations, salt, hash = encoded.split('$', 3) expected_call = (('wrong_password', salt, 5),) self.assertEqual(hasher.encode.call_args, expected_call) def test_pbkdf2_upgrade_new_hasher(self): hasher = get_hasher('default') self.assertEqual('pbkdf2_sha256', hasher.algorithm) self.assertNotEqual(hasher.iterations, 1) state = {'upgraded': False} def setter(password): state['upgraded'] = True with self.settings(PASSWORD_HASHERS=[ 'auth_tests.test_hashers.PBKDF2SingleIterationHasher']): encoded = make_password('letmein') algo, iterations, salt, hash = encoded.split('$', 3) self.assertEqual(iterations, '1') # No upgrade is triggered self.assertTrue(check_password('letmein', encoded, setter)) self.assertFalse(state['upgraded']) # Revert to the old iteration count and check if the password would get # updated to the new iteration count. with self.settings(PASSWORD_HASHERS=[ 'django.contrib.auth.hashers.PBKDF2PasswordHasher', 'auth_tests.test_hashers.PBKDF2SingleIterationHasher']): self.assertTrue(check_password('letmein', encoded, setter)) self.assertTrue(state['upgraded']) def test_check_password_calls_harden_runtime(self): hasher = get_hasher('default') encoded = make_password('letmein') with mock.patch.object(hasher, 'harden_runtime'), \ mock.patch.object(hasher, 'must_update', return_value=True): # Correct password supplied, no hardening needed check_password('letmein', encoded) self.assertEqual(hasher.harden_runtime.call_count, 0) # Wrong password supplied, hardening needed check_password('wrong_password', encoded) self.assertEqual(hasher.harden_runtime.call_count, 1) class BasePasswordHasherTests(SimpleTestCase): not_implemented_msg = 'subclasses of BasePasswordHasher must provide %s() method' def setUp(self): self.hasher = BasePasswordHasher() def test_load_library_no_algorithm(self): msg = "Hasher 'BasePasswordHasher' doesn't specify a library attribute" with self.assertRaisesMessage(ValueError, msg): self.hasher._load_library() def test_load_library_importerror(self): PlainHasher = type('PlainHasher', (BasePasswordHasher,), {'algorithm': 'plain', 'library': 'plain'}) msg = "Couldn't load 'PlainHasher' algorithm library: No module named 'plain'" with self.assertRaisesMessage(ValueError, msg): PlainHasher()._load_library() def test_attributes(self): self.assertIsNone(self.hasher.algorithm) self.assertIsNone(self.hasher.library) def test_encode(self): msg = self.not_implemented_msg % 'an encode' with self.assertRaisesMessage(NotImplementedError, msg): self.hasher.encode('password', 'salt') def test_harden_runtime(self): msg = 'subclasses of BasePasswordHasher should provide a harden_runtime() method' with self.assertWarns(Warning, msg=msg): self.hasher.harden_runtime('password', 'encoded') def test_must_update(self): self.assertIs(self.hasher.must_update('encoded'), False) def test_safe_summary(self): msg = self.not_implemented_msg % 'a safe_summary' with self.assertRaisesMessage(NotImplementedError, msg): self.hasher.safe_summary('encoded') def test_verify(self): msg = self.not_implemented_msg % 'a verify' with self.assertRaisesMessage(NotImplementedError, msg): self.hasher.verify('password', 'encoded') @skipUnless(argon2, "argon2-cffi not installed") @override_settings(PASSWORD_HASHERS=PASSWORD_HASHERS) class TestUtilsHashPassArgon2(SimpleTestCase): def test_argon2(self): encoded = make_password('lètmein', hasher='argon2') self.assertTrue(is_password_usable(encoded)) self.assertTrue(encoded.startswith('argon2$')) self.assertTrue(check_password('lètmein', encoded)) self.assertFalse(check_password('lètmeinz', encoded)) self.assertEqual(identify_hasher(encoded).algorithm, 'argon2') # Blank passwords blank_encoded = make_password('', hasher='argon2') self.assertTrue(blank_encoded.startswith('argon2$')) self.assertTrue(is_password_usable(blank_encoded)) self.assertTrue(check_password('', blank_encoded)) self.assertFalse(check_password(' ', blank_encoded)) # Old hashes without version attribute encoded = ( 'argon2$argon2i$m=8,t=1,p=1$c29tZXNhbHQ$gwQOXSNhxiOxPOA0+PY10P9QFO' '4NAYysnqRt1GSQLE55m+2GYDt9FEjPMHhP2Cuf0nOEXXMocVrsJAtNSsKyfg' ) self.assertTrue(check_password('secret', encoded)) self.assertFalse(check_password('wrong', encoded)) def test_argon2_upgrade(self): self._test_argon2_upgrade('time_cost', 'time cost', 1) self._test_argon2_upgrade('memory_cost', 'memory cost', 16) self._test_argon2_upgrade('parallelism', 'parallelism', 1) def test_argon2_version_upgrade(self): hasher = get_hasher('argon2') state = {'upgraded': False} encoded = ( 'argon2$argon2i$m=8,t=1,p=1$c29tZXNhbHQ$gwQOXSNhxiOxPOA0+PY10P9QFO' '4NAYysnqRt1GSQLE55m+2GYDt9FEjPMHhP2Cuf0nOEXXMocVrsJAtNSsKyfg' ) def setter(password): state['upgraded'] = True old_m = hasher.memory_cost old_t = hasher.time_cost old_p = hasher.parallelism try: hasher.memory_cost = 8 hasher.time_cost = 1 hasher.parallelism = 1 self.assertTrue(check_password('secret', encoded, setter, 'argon2')) self.assertTrue(state['upgraded']) finally: hasher.memory_cost = old_m hasher.time_cost = old_t hasher.parallelism = old_p def _test_argon2_upgrade(self, attr, summary_key, new_value): hasher = get_hasher('argon2') self.assertEqual('argon2', hasher.algorithm) self.assertNotEqual(getattr(hasher, attr), new_value) old_value = getattr(hasher, attr) try: # Generate hash with attr set to 1 setattr(hasher, attr, new_value) encoded = make_password('letmein', hasher='argon2') attr_value = hasher.safe_summary(encoded)[summary_key] self.assertEqual(attr_value, new_value) state = {'upgraded': False} def setter(password): state['upgraded'] = True # No upgrade is triggered. self.assertTrue(check_password('letmein', encoded, setter, 'argon2')) self.assertFalse(state['upgraded']) # Revert to the old rounds count and ... setattr(hasher, attr, old_value) # ... check if the password would get updated to the new count. self.assertTrue(check_password('letmein', encoded, setter, 'argon2')) self.assertTrue(state['upgraded']) finally: setattr(hasher, attr, old_value)
6ac926f8fd00a728c0306ef12cf958fd0df662f9da208937353fa4a88ded486a
import uuid from django.core.exceptions import ImproperlyConfigured from django.test import SimpleTestCase from django.test.utils import override_settings from django.urls import NoReverseMatch, Resolver404, path, resolve, reverse from .converters import DynamicConverter from .views import empty_view included_kwargs = {'base': b'hello', 'value': b'world'} converter_test_data = ( # ('url', ('url_name', 'app_name', {kwargs})), # aGVsbG8= is 'hello' encoded in base64. ('/base64/aGVsbG8=/', ('base64', '', {'value': b'hello'})), ('/base64/aGVsbG8=/subpatterns/d29ybGQ=/', ('subpattern-base64', '', included_kwargs)), ('/base64/aGVsbG8=/namespaced/d29ybGQ=/', ('subpattern-base64', 'namespaced-base64', included_kwargs)), ) @override_settings(ROOT_URLCONF='urlpatterns.path_urls') class SimplifiedURLTests(SimpleTestCase): def test_path_lookup_without_parameters(self): match = resolve('/articles/2003/') self.assertEqual(match.url_name, 'articles-2003') self.assertEqual(match.args, ()) self.assertEqual(match.kwargs, {}) self.assertEqual(match.route, 'articles/2003/') def test_path_lookup_with_typed_parameters(self): match = resolve('/articles/2015/') self.assertEqual(match.url_name, 'articles-year') self.assertEqual(match.args, ()) self.assertEqual(match.kwargs, {'year': 2015}) self.assertEqual(match.route, 'articles/<int:year>/') def test_path_lookup_with_multiple_parameters(self): match = resolve('/articles/2015/04/12/') self.assertEqual(match.url_name, 'articles-year-month-day') self.assertEqual(match.args, ()) self.assertEqual(match.kwargs, {'year': 2015, 'month': 4, 'day': 12}) self.assertEqual(match.route, 'articles/<int:year>/<int:month>/<int:day>/') def test_two_variable_at_start_of_path_pattern(self): match = resolve('/en/foo/') self.assertEqual(match.url_name, 'lang-and-path') self.assertEqual(match.kwargs, {'lang': 'en', 'url': 'foo'}) self.assertEqual(match.route, '<lang>/<path:url>/') def test_re_path(self): match = resolve('/regex/1/') self.assertEqual(match.url_name, 'regex') self.assertEqual(match.kwargs, {'pk': '1'}) self.assertEqual(match.route, '^regex/(?P<pk>[0-9]+)/$') def test_re_path_with_optional_parameter(self): for url, kwargs in ( ('/regex_optional/1/2/', {'arg1': '1', 'arg2': '2'}), ('/regex_optional/1/', {'arg1': '1'}), ): with self.subTest(url=url): match = resolve(url) self.assertEqual(match.url_name, 'regex_optional') self.assertEqual(match.kwargs, kwargs) self.assertEqual( match.route, r'^regex_optional/(?P<arg1>\d+)/(?:(?P<arg2>\d+)/)?', ) def test_re_path_with_missing_optional_parameter(self): match = resolve('/regex_only_optional/') self.assertEqual(match.url_name, 'regex_only_optional') self.assertEqual(match.kwargs, {}) self.assertEqual(match.args, ()) self.assertEqual( match.route, r'^regex_only_optional/(?:(?P<arg1>\d+)/)?', ) def test_path_lookup_with_inclusion(self): match = resolve('/included_urls/extra/something/') self.assertEqual(match.url_name, 'inner-extra') self.assertEqual(match.route, 'included_urls/extra/<extra>/') def test_path_lookup_with_empty_string_inclusion(self): match = resolve('/more/99/') self.assertEqual(match.url_name, 'inner-more') self.assertEqual(match.route, r'^more/(?P<extra>\w+)/$') def test_path_lookup_with_double_inclusion(self): match = resolve('/included_urls/more/some_value/') self.assertEqual(match.url_name, 'inner-more') self.assertEqual(match.route, r'included_urls/more/(?P<extra>\w+)/$') def test_path_reverse_without_parameter(self): url = reverse('articles-2003') self.assertEqual(url, '/articles/2003/') def test_path_reverse_with_parameter(self): url = reverse('articles-year-month-day', kwargs={'year': 2015, 'month': 4, 'day': 12}) self.assertEqual(url, '/articles/2015/4/12/') @override_settings(ROOT_URLCONF='urlpatterns.path_base64_urls') def test_converter_resolve(self): for url, (url_name, app_name, kwargs) in converter_test_data: with self.subTest(url=url): match = resolve(url) self.assertEqual(match.url_name, url_name) self.assertEqual(match.app_name, app_name) self.assertEqual(match.kwargs, kwargs) @override_settings(ROOT_URLCONF='urlpatterns.path_base64_urls') def test_converter_reverse(self): for expected, (url_name, app_name, kwargs) in converter_test_data: if app_name: url_name = '%s:%s' % (app_name, url_name) with self.subTest(url=url_name): url = reverse(url_name, kwargs=kwargs) self.assertEqual(url, expected) @override_settings(ROOT_URLCONF='urlpatterns.path_base64_urls') def test_converter_reverse_with_second_layer_instance_namespace(self): kwargs = included_kwargs.copy() kwargs['last_value'] = b'world' url = reverse('instance-ns-base64:subsubpattern-base64', kwargs=kwargs) self.assertEqual(url, '/base64/aGVsbG8=/subpatterns/d29ybGQ=/d29ybGQ=/') def test_path_inclusion_is_matchable(self): match = resolve('/included_urls/extra/something/') self.assertEqual(match.url_name, 'inner-extra') self.assertEqual(match.kwargs, {'extra': 'something'}) def test_path_inclusion_is_reversible(self): url = reverse('inner-extra', kwargs={'extra': 'something'}) self.assertEqual(url, '/included_urls/extra/something/') def test_invalid_converter(self): msg = "URL route 'foo/<nonexistent:var>/' uses invalid converter 'nonexistent'." with self.assertRaisesMessage(ImproperlyConfigured, msg): path('foo/<nonexistent:var>/', empty_view) def test_space_in_route(self): msg = "URL route 'space/<int: num>' cannot contain whitespace." with self.assertRaisesMessage(ImproperlyConfigured, msg): path('space/<int: num>', empty_view) @override_settings(ROOT_URLCONF='urlpatterns.converter_urls') class ConverterTests(SimpleTestCase): def test_matching_urls(self): def no_converter(x): return x test_data = ( ('int', {'0', '1', '01', 1234567890}, int), ('str', {'abcxyz'}, no_converter), ('path', {'allows.ANY*characters'}, no_converter), ('slug', {'abcxyz-ABCXYZ_01234567890'}, no_converter), ('uuid', {'39da9369-838e-4750-91a5-f7805cd82839'}, uuid.UUID), ) for url_name, url_suffixes, converter in test_data: for url_suffix in url_suffixes: url = '/%s/%s/' % (url_name, url_suffix) with self.subTest(url=url): match = resolve(url) self.assertEqual(match.url_name, url_name) self.assertEqual(match.kwargs, {url_name: converter(url_suffix)}) # reverse() works with string parameters. string_kwargs = {url_name: url_suffix} self.assertEqual(reverse(url_name, kwargs=string_kwargs), url) # reverse() also works with native types (int, UUID, etc.). if converter is not no_converter: # The converted value might be different for int (a # leading zero is lost in the conversion). converted_value = match.kwargs[url_name] converted_url = '/%s/%s/' % (url_name, converted_value) self.assertEqual(reverse(url_name, kwargs={url_name: converted_value}), converted_url) def test_nonmatching_urls(self): test_data = ( ('int', {'-1', 'letters'}), ('str', {'', '/'}), ('path', {''}), ('slug', {'', 'stars*notallowed'}), ('uuid', { '', '9da9369-838e-4750-91a5-f7805cd82839', '39da9369-838-4750-91a5-f7805cd82839', '39da9369-838e-475-91a5-f7805cd82839', '39da9369-838e-4750-91a-f7805cd82839', '39da9369-838e-4750-91a5-f7805cd8283', }), ) for url_name, url_suffixes in test_data: for url_suffix in url_suffixes: url = '/%s/%s/' % (url_name, url_suffix) with self.subTest(url=url), self.assertRaises(Resolver404): resolve(url) @override_settings(ROOT_URLCONF='urlpatterns.path_same_name_urls') class SameNameTests(SimpleTestCase): def test_matching_urls_same_name(self): @DynamicConverter.register_to_url def requires_tiny_int(value): if value > 5: raise ValueError return value tests = [ ('number_of_args', [ ([], {}, '0/'), ([1], {}, '1/1/'), ]), ('kwargs_names', [ ([], {'a': 1}, 'a/1/'), ([], {'b': 1}, 'b/1/'), ]), ('converter', [ (['a/b'], {}, 'path/a/b/'), (['a b'], {}, 'str/a%20b/'), (['a-b'], {}, 'slug/a-b/'), (['2'], {}, 'int/2/'), ( ['39da9369-838e-4750-91a5-f7805cd82839'], {}, 'uuid/39da9369-838e-4750-91a5-f7805cd82839/' ), ]), ('regex', [ (['ABC'], {}, 'uppercase/ABC/'), (['abc'], {}, 'lowercase/abc/'), ]), ('converter_to_url', [ ([6], {}, 'int/6/'), ([1], {}, 'tiny_int/1/'), ]), ] for url_name, cases in tests: for args, kwargs, url_suffix in cases: expected_url = '/%s/%s' % (url_name, url_suffix) with self.subTest(url=expected_url): self.assertEqual( reverse(url_name, args=args, kwargs=kwargs), expected_url, ) class ParameterRestrictionTests(SimpleTestCase): def test_non_identifier_parameter_name_causes_exception(self): msg = ( "URL route 'hello/<int:1>/' uses parameter name '1' which isn't " "a valid Python identifier." ) with self.assertRaisesMessage(ImproperlyConfigured, msg): path(r'hello/<int:1>/', lambda r: None) def test_allows_non_ascii_but_valid_identifiers(self): # \u0394 is "GREEK CAPITAL LETTER DELTA", a valid identifier. p = path('hello/<str:\u0394>/', lambda r: None) match = p.resolve('hello/1/') self.assertEqual(match.kwargs, {'\u0394': '1'}) @override_settings(ROOT_URLCONF='urlpatterns.path_dynamic_urls') class ConversionExceptionTests(SimpleTestCase): """How are errors in Converter.to_python() and to_url() handled?""" def test_resolve_value_error_means_no_match(self): @DynamicConverter.register_to_python def raises_value_error(value): raise ValueError() with self.assertRaises(Resolver404): resolve('/dynamic/abc/') def test_resolve_type_error_propagates(self): @DynamicConverter.register_to_python def raises_type_error(value): raise TypeError('This type error propagates.') with self.assertRaisesMessage(TypeError, 'This type error propagates.'): resolve('/dynamic/abc/') def test_reverse_value_error_means_no_match(self): @DynamicConverter.register_to_url def raises_value_error(value): raise ValueError with self.assertRaises(NoReverseMatch): reverse('dynamic', kwargs={'value': object()}) def test_reverse_type_error_propagates(self): @DynamicConverter.register_to_url def raises_type_error(value): raise TypeError('This type error propagates.') with self.assertRaisesMessage(TypeError, 'This type error propagates.'): reverse('dynamic', kwargs={'value': object()})
c81e83bb71bc117ea6ccfdab247c5f8ff7043862b151ed151e619533ec6c58ab
from django.urls import path, re_path, register_converter from . import converters, views register_converter(converters.DynamicConverter, 'to_url_value_error') urlpatterns = [ # Different number of arguments. path('number_of_args/0/', views.empty_view, name='number_of_args'), path('number_of_args/1/<value>/', views.empty_view, name='number_of_args'), # Different names of the keyword arguments. path('kwargs_names/a/<a>/', views.empty_view, name='kwargs_names'), path('kwargs_names/b/<b>/', views.empty_view, name='kwargs_names'), # Different path converters. path('converter/path/<path:value>/', views.empty_view, name='converter'), path('converter/str/<str:value>/', views.empty_view, name='converter'), path('converter/slug/<slug:value>/', views.empty_view, name='converter'), path('converter/int/<int:value>/', views.empty_view, name='converter'), path('converter/uuid/<uuid:value>/', views.empty_view, name='converter'), # Different regular expressions. re_path(r'^regex/uppercase/([A-Z]+)/', views.empty_view, name='regex'), re_path(r'^regex/lowercase/([a-z]+)/', views.empty_view, name='regex'), # converter.to_url() raises ValueError (no match). path( 'converter_to_url/int/<value>/', views.empty_view, name='converter_to_url', ), path( 'converter_to_url/tiny_int/<to_url_value_error:value>/', views.empty_view, name='converter_to_url', ), ]
da572fa836d36ead6ba54d280788c8ee0d2936098a523e827159e997d615d88f
import datetime import re from decimal import Decimal from django.core.exceptions import FieldError from django.db import connection from django.db.models import ( Avg, Count, DecimalField, DurationField, F, FloatField, Func, IntegerField, Max, Min, Sum, Value, ) from django.db.models.expressions import Case, Exists, OuterRef, Subquery, When from django.test import TestCase from django.test.testcases import skipUnlessDBFeature from django.test.utils import Approximate, CaptureQueriesContext from django.utils import timezone from .models import Author, Book, Publisher, Store class AggregateTestCase(TestCase): @classmethod def setUpTestData(cls): cls.a1 = Author.objects.create(name='Adrian Holovaty', age=34) cls.a2 = Author.objects.create(name='Jacob Kaplan-Moss', age=35) cls.a3 = Author.objects.create(name='Brad Dayley', age=45) cls.a4 = Author.objects.create(name='James Bennett', age=29) cls.a5 = Author.objects.create(name='Jeffrey Forcier', age=37) cls.a6 = Author.objects.create(name='Paul Bissex', age=29) cls.a7 = Author.objects.create(name='Wesley J. Chun', age=25) cls.a8 = Author.objects.create(name='Peter Norvig', age=57) cls.a9 = Author.objects.create(name='Stuart Russell', age=46) cls.a1.friends.add(cls.a2, cls.a4) cls.a2.friends.add(cls.a1, cls.a7) cls.a4.friends.add(cls.a1) cls.a5.friends.add(cls.a6, cls.a7) cls.a6.friends.add(cls.a5, cls.a7) cls.a7.friends.add(cls.a2, cls.a5, cls.a6) cls.a8.friends.add(cls.a9) cls.a9.friends.add(cls.a8) cls.p1 = Publisher.objects.create(name='Apress', num_awards=3, duration=datetime.timedelta(days=1)) cls.p2 = Publisher.objects.create(name='Sams', num_awards=1, duration=datetime.timedelta(days=2)) cls.p3 = Publisher.objects.create(name='Prentice Hall', num_awards=7) cls.p4 = Publisher.objects.create(name='Morgan Kaufmann', num_awards=9) cls.p5 = Publisher.objects.create(name="Jonno's House of Books", num_awards=0) cls.b1 = Book.objects.create( isbn='159059725', name='The Definitive Guide to Django: Web Development Done Right', pages=447, rating=4.5, price=Decimal('30.00'), contact=cls.a1, publisher=cls.p1, pubdate=datetime.date(2007, 12, 6) ) cls.b2 = Book.objects.create( isbn='067232959', name='Sams Teach Yourself Django in 24 Hours', pages=528, rating=3.0, price=Decimal('23.09'), contact=cls.a3, publisher=cls.p2, pubdate=datetime.date(2008, 3, 3) ) cls.b3 = Book.objects.create( isbn='159059996', name='Practical Django Projects', pages=300, rating=4.0, price=Decimal('29.69'), contact=cls.a4, publisher=cls.p1, pubdate=datetime.date(2008, 6, 23) ) cls.b4 = Book.objects.create( isbn='013235613', name='Python Web Development with Django', pages=350, rating=4.0, price=Decimal('29.69'), contact=cls.a5, publisher=cls.p3, pubdate=datetime.date(2008, 11, 3) ) cls.b5 = Book.objects.create( isbn='013790395', name='Artificial Intelligence: A Modern Approach', pages=1132, rating=4.0, price=Decimal('82.80'), contact=cls.a8, publisher=cls.p3, pubdate=datetime.date(1995, 1, 15) ) cls.b6 = Book.objects.create( isbn='155860191', name='Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', pages=946, rating=5.0, price=Decimal('75.00'), contact=cls.a8, publisher=cls.p4, pubdate=datetime.date(1991, 10, 15) ) cls.b1.authors.add(cls.a1, cls.a2) cls.b2.authors.add(cls.a3) cls.b3.authors.add(cls.a4) cls.b4.authors.add(cls.a5, cls.a6, cls.a7) cls.b5.authors.add(cls.a8, cls.a9) cls.b6.authors.add(cls.a8) s1 = Store.objects.create( name='Amazon.com', original_opening=datetime.datetime(1994, 4, 23, 9, 17, 42), friday_night_closing=datetime.time(23, 59, 59) ) s2 = Store.objects.create( name='Books.com', original_opening=datetime.datetime(2001, 3, 15, 11, 23, 37), friday_night_closing=datetime.time(23, 59, 59) ) s3 = Store.objects.create( name="Mamma and Pappa's Books", original_opening=datetime.datetime(1945, 4, 25, 16, 24, 14), friday_night_closing=datetime.time(21, 30) ) s1.books.add(cls.b1, cls.b2, cls.b3, cls.b4, cls.b5, cls.b6) s2.books.add(cls.b1, cls.b3, cls.b5, cls.b6) s3.books.add(cls.b3, cls.b4, cls.b6) def test_empty_aggregate(self): self.assertEqual(Author.objects.all().aggregate(), {}) def test_aggregate_in_order_by(self): msg = ( 'Using an aggregate in order_by() without also including it in ' 'annotate() is not allowed: Avg(F(book__rating)' ) with self.assertRaisesMessage(FieldError, msg): Author.objects.values('age').order_by(Avg('book__rating')) def test_single_aggregate(self): vals = Author.objects.aggregate(Avg("age")) self.assertEqual(vals, {"age__avg": Approximate(37.4, places=1)}) def test_multiple_aggregates(self): vals = Author.objects.aggregate(Sum("age"), Avg("age")) self.assertEqual(vals, {"age__sum": 337, "age__avg": Approximate(37.4, places=1)}) def test_filter_aggregate(self): vals = Author.objects.filter(age__gt=29).aggregate(Sum("age")) self.assertEqual(vals, {'age__sum': 254}) def test_related_aggregate(self): vals = Author.objects.aggregate(Avg("friends__age")) self.assertEqual(vals, {'friends__age__avg': Approximate(34.07, places=2)}) vals = Book.objects.filter(rating__lt=4.5).aggregate(Avg("authors__age")) self.assertEqual(vals, {'authors__age__avg': Approximate(38.2857, places=2)}) vals = Author.objects.all().filter(name__contains="a").aggregate(Avg("book__rating")) self.assertEqual(vals, {'book__rating__avg': 4.0}) vals = Book.objects.aggregate(Sum("publisher__num_awards")) self.assertEqual(vals, {'publisher__num_awards__sum': 30}) vals = Publisher.objects.aggregate(Sum("book__price")) self.assertEqual(vals, {'book__price__sum': Decimal('270.27')}) def test_aggregate_multi_join(self): vals = Store.objects.aggregate(Max("books__authors__age")) self.assertEqual(vals, {'books__authors__age__max': 57}) vals = Author.objects.aggregate(Min("book__publisher__num_awards")) self.assertEqual(vals, {'book__publisher__num_awards__min': 1}) def test_aggregate_alias(self): vals = Store.objects.filter(name="Amazon.com").aggregate(amazon_mean=Avg("books__rating")) self.assertEqual(vals, {'amazon_mean': Approximate(4.08, places=2)}) def test_annotate_basic(self): self.assertQuerysetEqual( Book.objects.annotate().order_by('pk'), [ "The Definitive Guide to Django: Web Development Done Right", "Sams Teach Yourself Django in 24 Hours", "Practical Django Projects", "Python Web Development with Django", "Artificial Intelligence: A Modern Approach", "Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp" ], lambda b: b.name ) books = Book.objects.annotate(mean_age=Avg("authors__age")) b = books.get(pk=self.b1.pk) self.assertEqual( b.name, 'The Definitive Guide to Django: Web Development Done Right' ) self.assertEqual(b.mean_age, 34.5) def test_annotate_defer(self): qs = Book.objects.annotate( page_sum=Sum("pages")).defer('name').filter(pk=self.b1.pk) rows = [ (self.b1.id, "159059725", 447, "The Definitive Guide to Django: Web Development Done Right") ] self.assertQuerysetEqual( qs.order_by('pk'), rows, lambda r: (r.id, r.isbn, r.page_sum, r.name) ) def test_annotate_defer_select_related(self): qs = Book.objects.select_related('contact').annotate( page_sum=Sum("pages")).defer('name').filter(pk=self.b1.pk) rows = [ (self.b1.id, "159059725", 447, "Adrian Holovaty", "The Definitive Guide to Django: Web Development Done Right") ] self.assertQuerysetEqual( qs.order_by('pk'), rows, lambda r: (r.id, r.isbn, r.page_sum, r.contact.name, r.name) ) def test_annotate_m2m(self): books = Book.objects.filter(rating__lt=4.5).annotate(Avg("authors__age")).order_by("name") self.assertQuerysetEqual( books, [ ('Artificial Intelligence: A Modern Approach', 51.5), ('Practical Django Projects', 29.0), ('Python Web Development with Django', Approximate(30.3, places=1)), ('Sams Teach Yourself Django in 24 Hours', 45.0) ], lambda b: (b.name, b.authors__age__avg), ) books = Book.objects.annotate(num_authors=Count("authors")).order_by("name") self.assertQuerysetEqual( books, [ ('Artificial Intelligence: A Modern Approach', 2), ('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 1), ('Practical Django Projects', 1), ('Python Web Development with Django', 3), ('Sams Teach Yourself Django in 24 Hours', 1), ('The Definitive Guide to Django: Web Development Done Right', 2) ], lambda b: (b.name, b.num_authors) ) def test_backwards_m2m_annotate(self): authors = Author.objects.filter(name__contains="a").annotate(Avg("book__rating")).order_by("name") self.assertQuerysetEqual( authors, [ ('Adrian Holovaty', 4.5), ('Brad Dayley', 3.0), ('Jacob Kaplan-Moss', 4.5), ('James Bennett', 4.0), ('Paul Bissex', 4.0), ('Stuart Russell', 4.0) ], lambda a: (a.name, a.book__rating__avg) ) authors = Author.objects.annotate(num_books=Count("book")).order_by("name") self.assertQuerysetEqual( authors, [ ('Adrian Holovaty', 1), ('Brad Dayley', 1), ('Jacob Kaplan-Moss', 1), ('James Bennett', 1), ('Jeffrey Forcier', 1), ('Paul Bissex', 1), ('Peter Norvig', 2), ('Stuart Russell', 1), ('Wesley J. Chun', 1) ], lambda a: (a.name, a.num_books) ) def test_reverse_fkey_annotate(self): books = Book.objects.annotate(Sum("publisher__num_awards")).order_by("name") self.assertQuerysetEqual( books, [ ('Artificial Intelligence: A Modern Approach', 7), ('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 9), ('Practical Django Projects', 3), ('Python Web Development with Django', 7), ('Sams Teach Yourself Django in 24 Hours', 1), ('The Definitive Guide to Django: Web Development Done Right', 3) ], lambda b: (b.name, b.publisher__num_awards__sum) ) publishers = Publisher.objects.annotate(Sum("book__price")).order_by("name") self.assertQuerysetEqual( publishers, [ ('Apress', Decimal("59.69")), ("Jonno's House of Books", None), ('Morgan Kaufmann', Decimal("75.00")), ('Prentice Hall', Decimal("112.49")), ('Sams', Decimal("23.09")) ], lambda p: (p.name, p.book__price__sum) ) def test_annotate_values(self): books = list(Book.objects.filter(pk=self.b1.pk).annotate(mean_age=Avg("authors__age")).values()) self.assertEqual( books, [ { "contact_id": self.a1.id, "id": self.b1.id, "isbn": "159059725", "mean_age": 34.5, "name": "The Definitive Guide to Django: Web Development Done Right", "pages": 447, "price": Approximate(Decimal("30")), "pubdate": datetime.date(2007, 12, 6), "publisher_id": self.p1.id, "rating": 4.5, } ] ) books = ( Book.objects .filter(pk=self.b1.pk) .annotate(mean_age=Avg('authors__age')) .values('pk', 'isbn', 'mean_age') ) self.assertEqual( list(books), [ { "pk": self.b1.pk, "isbn": "159059725", "mean_age": 34.5, } ] ) books = Book.objects.filter(pk=self.b1.pk).annotate(mean_age=Avg("authors__age")).values("name") self.assertEqual( list(books), [{'name': 'The Definitive Guide to Django: Web Development Done Right'}], ) books = Book.objects.filter(pk=self.b1.pk).values().annotate(mean_age=Avg('authors__age')) self.assertEqual( list(books), [ { "contact_id": self.a1.id, "id": self.b1.id, "isbn": "159059725", "mean_age": 34.5, "name": "The Definitive Guide to Django: Web Development Done Right", "pages": 447, "price": Approximate(Decimal("30")), "pubdate": datetime.date(2007, 12, 6), "publisher_id": self.p1.id, "rating": 4.5, } ] ) books = ( Book.objects .values("rating") .annotate(n_authors=Count("authors__id"), mean_age=Avg("authors__age")) .order_by("rating") ) self.assertEqual( list(books), [ { "rating": 3.0, "n_authors": 1, "mean_age": 45.0, }, { "rating": 4.0, "n_authors": 6, "mean_age": Approximate(37.16, places=1) }, { "rating": 4.5, "n_authors": 2, "mean_age": 34.5, }, { "rating": 5.0, "n_authors": 1, "mean_age": 57.0, } ] ) authors = Author.objects.annotate(Avg("friends__age")).order_by("name") self.assertQuerysetEqual( authors, [ ('Adrian Holovaty', 32.0), ('Brad Dayley', None), ('Jacob Kaplan-Moss', 29.5), ('James Bennett', 34.0), ('Jeffrey Forcier', 27.0), ('Paul Bissex', 31.0), ('Peter Norvig', 46.0), ('Stuart Russell', 57.0), ('Wesley J. Chun', Approximate(33.66, places=1)) ], lambda a: (a.name, a.friends__age__avg) ) def test_count(self): vals = Book.objects.aggregate(Count("rating")) self.assertEqual(vals, {"rating__count": 6}) def test_count_star(self): with self.assertNumQueries(1) as ctx: Book.objects.aggregate(n=Count("*")) sql = ctx.captured_queries[0]['sql'] self.assertIn('SELECT COUNT(*) ', sql) def test_count_distinct_expression(self): aggs = Book.objects.aggregate( distinct_ratings=Count(Case(When(pages__gt=300, then='rating')), distinct=True), ) self.assertEqual(aggs['distinct_ratings'], 4) def test_distinct_on_aggregate(self): for aggregate, expected_result in ( (Avg, 4.125), (Count, 4), (Sum, 16.5), ): with self.subTest(aggregate=aggregate.__name__): books = Book.objects.aggregate(ratings=aggregate('rating', distinct=True)) self.assertEqual(books['ratings'], expected_result) def test_non_grouped_annotation_not_in_group_by(self): """ An annotation not included in values() before an aggregate should be excluded from the group by clause. """ qs = ( Book.objects.annotate(xprice=F('price')).filter(rating=4.0).values('rating') .annotate(count=Count('publisher_id', distinct=True)).values('count', 'rating').order_by('count') ) self.assertEqual(list(qs), [{'rating': 4.0, 'count': 2}]) def test_grouped_annotation_in_group_by(self): """ An annotation included in values() before an aggregate should be included in the group by clause. """ qs = ( Book.objects.annotate(xprice=F('price')).filter(rating=4.0).values('rating', 'xprice') .annotate(count=Count('publisher_id', distinct=True)).values('count', 'rating').order_by('count') ) self.assertEqual( list(qs), [ {'rating': 4.0, 'count': 1}, {'rating': 4.0, 'count': 2}, ] ) def test_fkey_aggregate(self): explicit = list(Author.objects.annotate(Count('book__id'))) implicit = list(Author.objects.annotate(Count('book'))) self.assertCountEqual(explicit, implicit) def test_annotate_ordering(self): books = Book.objects.values('rating').annotate(oldest=Max('authors__age')).order_by('oldest', 'rating') self.assertEqual( list(books), [ {'rating': 4.5, 'oldest': 35}, {'rating': 3.0, 'oldest': 45}, {'rating': 4.0, 'oldest': 57}, {'rating': 5.0, 'oldest': 57}, ] ) books = Book.objects.values("rating").annotate(oldest=Max("authors__age")).order_by("-oldest", "-rating") self.assertEqual( list(books), [ {'rating': 5.0, 'oldest': 57}, {'rating': 4.0, 'oldest': 57}, {'rating': 3.0, 'oldest': 45}, {'rating': 4.5, 'oldest': 35}, ] ) def test_aggregate_annotation(self): vals = Book.objects.annotate(num_authors=Count("authors__id")).aggregate(Avg("num_authors")) self.assertEqual(vals, {"num_authors__avg": Approximate(1.66, places=1)}) def test_avg_duration_field(self): # Explicit `output_field`. self.assertEqual( Publisher.objects.aggregate(Avg('duration', output_field=DurationField())), {'duration__avg': datetime.timedelta(days=1, hours=12)} ) # Implicit `output_field`. self.assertEqual( Publisher.objects.aggregate(Avg('duration')), {'duration__avg': datetime.timedelta(days=1, hours=12)} ) def test_sum_duration_field(self): self.assertEqual( Publisher.objects.aggregate(Sum('duration', output_field=DurationField())), {'duration__sum': datetime.timedelta(days=3)} ) def test_sum_distinct_aggregate(self): """ Sum on a distinct() QuerySet should aggregate only the distinct items. """ authors = Author.objects.filter(book__in=[self.b5, self.b6]) self.assertEqual(authors.count(), 3) distinct_authors = authors.distinct() self.assertEqual(distinct_authors.count(), 2) # Selected author ages are 57 and 46 age_sum = distinct_authors.aggregate(Sum('age')) self.assertEqual(age_sum['age__sum'], 103) def test_filtering(self): p = Publisher.objects.create(name='Expensive Publisher', num_awards=0) Book.objects.create( name='ExpensiveBook1', pages=1, isbn='111', rating=3.5, price=Decimal("1000"), publisher=p, contact_id=self.a1.id, pubdate=datetime.date(2008, 12, 1) ) Book.objects.create( name='ExpensiveBook2', pages=1, isbn='222', rating=4.0, price=Decimal("1000"), publisher=p, contact_id=self.a1.id, pubdate=datetime.date(2008, 12, 2) ) Book.objects.create( name='ExpensiveBook3', pages=1, isbn='333', rating=4.5, price=Decimal("35"), publisher=p, contact_id=self.a1.id, pubdate=datetime.date(2008, 12, 3) ) publishers = Publisher.objects.annotate(num_books=Count("book__id")).filter(num_books__gt=1).order_by("pk") self.assertQuerysetEqual( publishers, ['Apress', 'Prentice Hall', 'Expensive Publisher'], lambda p: p.name, ) publishers = Publisher.objects.filter(book__price__lt=Decimal("40.0")).order_by("pk") self.assertQuerysetEqual( publishers, [ "Apress", "Apress", "Sams", "Prentice Hall", "Expensive Publisher", ], lambda p: p.name ) publishers = ( Publisher.objects .annotate(num_books=Count("book__id")) .filter(num_books__gt=1, book__price__lt=Decimal("40.0")) .order_by("pk") ) self.assertQuerysetEqual( publishers, ['Apress', 'Prentice Hall', 'Expensive Publisher'], lambda p: p.name, ) publishers = ( Publisher.objects .filter(book__price__lt=Decimal("40.0")) .annotate(num_books=Count("book__id")) .filter(num_books__gt=1) .order_by("pk") ) self.assertQuerysetEqual(publishers, ['Apress'], lambda p: p.name) publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__range=[1, 3]).order_by("pk") self.assertQuerysetEqual( publishers, [ "Apress", "Sams", "Prentice Hall", "Morgan Kaufmann", "Expensive Publisher", ], lambda p: p.name ) publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__range=[1, 2]).order_by("pk") self.assertQuerysetEqual( publishers, ['Apress', 'Sams', 'Prentice Hall', 'Morgan Kaufmann'], lambda p: p.name ) publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__in=[1, 3]).order_by("pk") self.assertQuerysetEqual( publishers, ['Sams', 'Morgan Kaufmann', 'Expensive Publisher'], lambda p: p.name, ) publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__isnull=True) self.assertEqual(len(publishers), 0) def test_annotation(self): vals = Author.objects.filter(pk=self.a1.pk).aggregate(Count("friends__id")) self.assertEqual(vals, {"friends__id__count": 2}) books = Book.objects.annotate(num_authors=Count("authors__name")).filter(num_authors__exact=2).order_by("pk") self.assertQuerysetEqual( books, [ "The Definitive Guide to Django: Web Development Done Right", "Artificial Intelligence: A Modern Approach", ], lambda b: b.name ) authors = ( Author.objects .annotate(num_friends=Count("friends__id", distinct=True)) .filter(num_friends=0) .order_by("pk") ) self.assertQuerysetEqual(authors, ['Brad Dayley'], lambda a: a.name) publishers = Publisher.objects.annotate(num_books=Count("book__id")).filter(num_books__gt=1).order_by("pk") self.assertQuerysetEqual(publishers, ['Apress', 'Prentice Hall'], lambda p: p.name) publishers = ( Publisher.objects .filter(book__price__lt=Decimal("40.0")) .annotate(num_books=Count("book__id")) .filter(num_books__gt=1) ) self.assertQuerysetEqual(publishers, ['Apress'], lambda p: p.name) books = ( Book.objects .annotate(num_authors=Count("authors__id")) .filter(authors__name__contains="Norvig", num_authors__gt=1) ) self.assertQuerysetEqual( books, ['Artificial Intelligence: A Modern Approach'], lambda b: b.name ) def test_more_aggregation(self): a = Author.objects.get(name__contains='Norvig') b = Book.objects.get(name__contains='Done Right') b.authors.add(a) b.save() vals = ( Book.objects .annotate(num_authors=Count("authors__id")) .filter(authors__name__contains="Norvig", num_authors__gt=1) .aggregate(Avg("rating")) ) self.assertEqual(vals, {"rating__avg": 4.25}) def test_even_more_aggregate(self): publishers = Publisher.objects.annotate( earliest_book=Min("book__pubdate"), ).exclude(earliest_book=None).order_by("earliest_book").values( 'earliest_book', 'num_awards', 'id', 'name', ) self.assertEqual( list(publishers), [ { 'earliest_book': datetime.date(1991, 10, 15), 'num_awards': 9, 'id': self.p4.id, 'name': 'Morgan Kaufmann' }, { 'earliest_book': datetime.date(1995, 1, 15), 'num_awards': 7, 'id': self.p3.id, 'name': 'Prentice Hall' }, { 'earliest_book': datetime.date(2007, 12, 6), 'num_awards': 3, 'id': self.p1.id, 'name': 'Apress' }, { 'earliest_book': datetime.date(2008, 3, 3), 'num_awards': 1, 'id': self.p2.id, 'name': 'Sams' } ] ) vals = Store.objects.aggregate(Max("friday_night_closing"), Min("original_opening")) self.assertEqual( vals, { "friday_night_closing__max": datetime.time(23, 59, 59), "original_opening__min": datetime.datetime(1945, 4, 25, 16, 24, 14), } ) def test_annotate_values_list(self): books = ( Book.objects .filter(pk=self.b1.pk) .annotate(mean_age=Avg("authors__age")) .values_list("pk", "isbn", "mean_age") ) self.assertEqual(list(books), [(self.b1.id, '159059725', 34.5)]) books = Book.objects.filter(pk=self.b1.pk).annotate(mean_age=Avg("authors__age")).values_list("isbn") self.assertEqual(list(books), [('159059725',)]) books = Book.objects.filter(pk=self.b1.pk).annotate(mean_age=Avg("authors__age")).values_list("mean_age") self.assertEqual(list(books), [(34.5,)]) books = ( Book.objects .filter(pk=self.b1.pk) .annotate(mean_age=Avg("authors__age")) .values_list("mean_age", flat=True) ) self.assertEqual(list(books), [34.5]) books = Book.objects.values_list("price").annotate(count=Count("price")).order_by("-count", "price") self.assertEqual( list(books), [ (Decimal("29.69"), 2), (Decimal('23.09'), 1), (Decimal('30'), 1), (Decimal('75'), 1), (Decimal('82.8'), 1), ] ) def test_dates_with_aggregation(self): """ .dates() returns a distinct set of dates when applied to a QuerySet with aggregation. Refs #18056. Previously, .dates() would return distinct (date_kind, aggregation) sets, in this case (year, num_authors), so 2008 would be returned twice because there are books from 2008 with a different number of authors. """ dates = Book.objects.annotate(num_authors=Count("authors")).dates('pubdate', 'year') self.assertQuerysetEqual( dates, [ "datetime.date(1991, 1, 1)", "datetime.date(1995, 1, 1)", "datetime.date(2007, 1, 1)", "datetime.date(2008, 1, 1)" ] ) def test_values_aggregation(self): # Refs #20782 max_rating = Book.objects.values('rating').aggregate(max_rating=Max('rating')) self.assertEqual(max_rating['max_rating'], 5) max_books_per_rating = Book.objects.values('rating').annotate( books_per_rating=Count('id') ).aggregate(Max('books_per_rating')) self.assertEqual( max_books_per_rating, {'books_per_rating__max': 3}) def test_ticket17424(self): """ Doing exclude() on a foreign model after annotate() doesn't crash. """ all_books = list(Book.objects.values_list('pk', flat=True).order_by('pk')) annotated_books = Book.objects.order_by('pk').annotate(one=Count("id")) # The value doesn't matter, we just need any negative # constraint on a related model that's a noop. excluded_books = annotated_books.exclude(publisher__name="__UNLIKELY_VALUE__") # Try to generate query tree str(excluded_books.query) self.assertQuerysetEqual(excluded_books, all_books, lambda x: x.pk) # Check internal state self.assertIsNone(annotated_books.query.alias_map["aggregation_book"].join_type) self.assertIsNone(excluded_books.query.alias_map["aggregation_book"].join_type) def test_ticket12886(self): """ Aggregation over sliced queryset works correctly. """ qs = Book.objects.all().order_by('-rating')[0:3] vals = qs.aggregate(average_top3_rating=Avg('rating'))['average_top3_rating'] self.assertAlmostEqual(vals, 4.5, places=2) def test_ticket11881(self): """ Subqueries do not needlessly contain ORDER BY, SELECT FOR UPDATE or select_related() stuff. """ qs = Book.objects.all().select_for_update().order_by( 'pk').select_related('publisher').annotate(max_pk=Max('pk')) with CaptureQueriesContext(connection) as captured_queries: qs.aggregate(avg_pk=Avg('max_pk')) self.assertEqual(len(captured_queries), 1) qstr = captured_queries[0]['sql'].lower() self.assertNotIn('for update', qstr) forced_ordering = connection.ops.force_no_ordering() if forced_ordering: # If the backend needs to force an ordering we make sure it's # the only "ORDER BY" clause present in the query. self.assertEqual( re.findall(r'order by (\w+)', qstr), [', '.join(f[1][0] for f in forced_ordering).lower()] ) else: self.assertNotIn('order by', qstr) self.assertEqual(qstr.count(' join '), 0) def test_decimal_max_digits_has_no_effect(self): Book.objects.all().delete() a1 = Author.objects.first() p1 = Publisher.objects.first() thedate = timezone.now() for i in range(10): Book.objects.create( isbn="abcde{}".format(i), name="none", pages=10, rating=4.0, price=9999.98, contact=a1, publisher=p1, pubdate=thedate) book = Book.objects.aggregate(price_sum=Sum('price')) self.assertEqual(book['price_sum'], Decimal("99999.80")) def test_nonaggregate_aggregation_throws(self): with self.assertRaisesMessage(TypeError, 'fail is not an aggregate expression'): Book.objects.aggregate(fail=F('price')) def test_nonfield_annotation(self): book = Book.objects.annotate(val=Max(Value(2, output_field=IntegerField()))).first() self.assertEqual(book.val, 2) book = Book.objects.annotate(val=Max(Value(2), output_field=IntegerField())).first() self.assertEqual(book.val, 2) book = Book.objects.annotate(val=Max(2, output_field=IntegerField())).first() self.assertEqual(book.val, 2) def test_missing_output_field_raises_error(self): with self.assertRaisesMessage(FieldError, 'Cannot resolve expression type, unknown output_field'): Book.objects.annotate(val=Max(2)).first() def test_annotation_expressions(self): authors = Author.objects.annotate(combined_ages=Sum(F('age') + F('friends__age'))).order_by('name') authors2 = Author.objects.annotate(combined_ages=Sum('age') + Sum('friends__age')).order_by('name') for qs in (authors, authors2): self.assertQuerysetEqual( qs, [ ('Adrian Holovaty', 132), ('Brad Dayley', None), ('Jacob Kaplan-Moss', 129), ('James Bennett', 63), ('Jeffrey Forcier', 128), ('Paul Bissex', 120), ('Peter Norvig', 103), ('Stuart Russell', 103), ('Wesley J. Chun', 176) ], lambda a: (a.name, a.combined_ages) ) def test_aggregation_expressions(self): a1 = Author.objects.aggregate(av_age=Sum('age') / Count('*')) a2 = Author.objects.aggregate(av_age=Sum('age') / Count('age')) a3 = Author.objects.aggregate(av_age=Avg('age')) self.assertEqual(a1, {'av_age': 37}) self.assertEqual(a2, {'av_age': 37}) self.assertEqual(a3, {'av_age': Approximate(37.4, places=1)}) def test_avg_decimal_field(self): v = Book.objects.filter(rating=4).aggregate(avg_price=(Avg('price')))['avg_price'] self.assertIsInstance(v, Decimal) self.assertEqual(v, Approximate(Decimal('47.39'), places=2)) def test_order_of_precedence(self): p1 = Book.objects.filter(rating=4).aggregate(avg_price=(Avg('price') + 2) * 3) self.assertEqual(p1, {'avg_price': Approximate(Decimal('148.18'), places=2)}) p2 = Book.objects.filter(rating=4).aggregate(avg_price=Avg('price') + 2 * 3) self.assertEqual(p2, {'avg_price': Approximate(Decimal('53.39'), places=2)}) def test_combine_different_types(self): msg = ( 'Expression contains mixed types: FloatField, IntegerField. ' 'You must set output_field.' ) qs = Book.objects.annotate(sums=Sum('rating') + Sum('pages') + Sum('price')) with self.assertRaisesMessage(FieldError, msg): qs.first() with self.assertRaisesMessage(FieldError, msg): qs.first() b1 = Book.objects.annotate(sums=Sum(F('rating') + F('pages') + F('price'), output_field=IntegerField())).get(pk=self.b4.pk) self.assertEqual(b1.sums, 383) b2 = Book.objects.annotate(sums=Sum(F('rating') + F('pages') + F('price'), output_field=FloatField())).get(pk=self.b4.pk) self.assertEqual(b2.sums, 383.69) b3 = Book.objects.annotate(sums=Sum(F('rating') + F('pages') + F('price'), output_field=DecimalField())).get(pk=self.b4.pk) self.assertEqual(b3.sums, Approximate(Decimal("383.69"), places=2)) def test_complex_aggregations_require_kwarg(self): with self.assertRaisesMessage(TypeError, 'Complex annotations require an alias'): Author.objects.annotate(Sum(F('age') + F('friends__age'))) with self.assertRaisesMessage(TypeError, 'Complex aggregates require an alias'): Author.objects.aggregate(Sum('age') / Count('age')) with self.assertRaisesMessage(TypeError, 'Complex aggregates require an alias'): Author.objects.aggregate(Sum(1)) def test_aggregate_over_complex_annotation(self): qs = Author.objects.annotate( combined_ages=Sum(F('age') + F('friends__age'))) age = qs.aggregate(max_combined_age=Max('combined_ages')) self.assertEqual(age['max_combined_age'], 176) age = qs.aggregate(max_combined_age_doubled=Max('combined_ages') * 2) self.assertEqual(age['max_combined_age_doubled'], 176 * 2) age = qs.aggregate( max_combined_age_doubled=Max('combined_ages') + Max('combined_ages')) self.assertEqual(age['max_combined_age_doubled'], 176 * 2) age = qs.aggregate( max_combined_age_doubled=Max('combined_ages') + Max('combined_ages'), sum_combined_age=Sum('combined_ages')) self.assertEqual(age['max_combined_age_doubled'], 176 * 2) self.assertEqual(age['sum_combined_age'], 954) age = qs.aggregate( max_combined_age_doubled=Max('combined_ages') + Max('combined_ages'), sum_combined_age_doubled=Sum('combined_ages') + Sum('combined_ages')) self.assertEqual(age['max_combined_age_doubled'], 176 * 2) self.assertEqual(age['sum_combined_age_doubled'], 954 * 2) def test_values_annotation_with_expression(self): # ensure the F() is promoted to the group by clause qs = Author.objects.values('name').annotate(another_age=Sum('age') + F('age')) a = qs.get(name="Adrian Holovaty") self.assertEqual(a['another_age'], 68) qs = qs.annotate(friend_count=Count('friends')) a = qs.get(name="Adrian Holovaty") self.assertEqual(a['friend_count'], 2) qs = qs.annotate(combined_age=Sum('age') + F('friends__age')).filter( name="Adrian Holovaty").order_by('-combined_age') self.assertEqual( list(qs), [ { "name": 'Adrian Holovaty', "another_age": 68, "friend_count": 1, "combined_age": 69 }, { "name": 'Adrian Holovaty', "another_age": 68, "friend_count": 1, "combined_age": 63 } ] ) vals = qs.values('name', 'combined_age') self.assertEqual( list(vals), [ {'name': 'Adrian Holovaty', 'combined_age': 69}, {'name': 'Adrian Holovaty', 'combined_age': 63}, ] ) def test_annotate_values_aggregate(self): alias_age = Author.objects.annotate( age_alias=F('age') ).values( 'age_alias', ).aggregate(sum_age=Sum('age_alias')) age = Author.objects.values('age').aggregate(sum_age=Sum('age')) self.assertEqual(alias_age['sum_age'], age['sum_age']) def test_annotate_over_annotate(self): author = Author.objects.annotate( age_alias=F('age') ).annotate( sum_age=Sum('age_alias') ).get(name="Adrian Holovaty") other_author = Author.objects.annotate( sum_age=Sum('age') ).get(name="Adrian Holovaty") self.assertEqual(author.sum_age, other_author.sum_age) def test_annotated_aggregate_over_annotated_aggregate(self): with self.assertRaisesMessage(FieldError, "Cannot compute Sum('id__max'): 'id__max' is an aggregate"): Book.objects.annotate(Max('id')).annotate(Sum('id__max')) class MyMax(Max): def as_sql(self, compiler, connection): self.set_source_expressions(self.get_source_expressions()[0:1]) return super().as_sql(compiler, connection) with self.assertRaisesMessage(FieldError, "Cannot compute Max('id__max'): 'id__max' is an aggregate"): Book.objects.annotate(Max('id')).annotate(my_max=MyMax('id__max', 'price')) def test_multi_arg_aggregate(self): class MyMax(Max): output_field = DecimalField() def as_sql(self, compiler, connection): copy = self.copy() copy.set_source_expressions(copy.get_source_expressions()[0:1]) return super(MyMax, copy).as_sql(compiler, connection) with self.assertRaisesMessage(TypeError, 'Complex aggregates require an alias'): Book.objects.aggregate(MyMax('pages', 'price')) with self.assertRaisesMessage(TypeError, 'Complex annotations require an alias'): Book.objects.annotate(MyMax('pages', 'price')) Book.objects.aggregate(max_field=MyMax('pages', 'price')) def test_add_implementation(self): class MySum(Sum): pass # test completely changing how the output is rendered def lower_case_function_override(self, compiler, connection): sql, params = compiler.compile(self.source_expressions[0]) substitutions = {'function': self.function.lower(), 'expressions': sql, 'distinct': ''} substitutions.update(self.extra) return self.template % substitutions, params setattr(MySum, 'as_' + connection.vendor, lower_case_function_override) qs = Book.objects.annotate( sums=MySum(F('rating') + F('pages') + F('price'), output_field=IntegerField()) ) self.assertEqual(str(qs.query).count('sum('), 1) b1 = qs.get(pk=self.b4.pk) self.assertEqual(b1.sums, 383) # test changing the dict and delegating def lower_case_function_super(self, compiler, connection): self.extra['function'] = self.function.lower() return super(MySum, self).as_sql(compiler, connection) setattr(MySum, 'as_' + connection.vendor, lower_case_function_super) qs = Book.objects.annotate( sums=MySum(F('rating') + F('pages') + F('price'), output_field=IntegerField()) ) self.assertEqual(str(qs.query).count('sum('), 1) b1 = qs.get(pk=self.b4.pk) self.assertEqual(b1.sums, 383) # test overriding all parts of the template def be_evil(self, compiler, connection): substitutions = {'function': 'MAX', 'expressions': '2', 'distinct': ''} substitutions.update(self.extra) return self.template % substitutions, () setattr(MySum, 'as_' + connection.vendor, be_evil) qs = Book.objects.annotate( sums=MySum(F('rating') + F('pages') + F('price'), output_field=IntegerField()) ) self.assertEqual(str(qs.query).count('MAX('), 1) b1 = qs.get(pk=self.b4.pk) self.assertEqual(b1.sums, 2) def test_complex_values_aggregation(self): max_rating = Book.objects.values('rating').aggregate( double_max_rating=Max('rating') + Max('rating')) self.assertEqual(max_rating['double_max_rating'], 5 * 2) max_books_per_rating = Book.objects.values('rating').annotate( books_per_rating=Count('id') + 5 ).aggregate(Max('books_per_rating')) self.assertEqual( max_books_per_rating, {'books_per_rating__max': 3 + 5}) def test_expression_on_aggregation(self): # Create a plain expression class Greatest(Func): function = 'GREATEST' def as_sqlite(self, compiler, connection, **extra_context): return super().as_sql(compiler, connection, function='MAX', **extra_context) qs = Publisher.objects.annotate( price_or_median=Greatest(Avg('book__rating', output_field=DecimalField()), Avg('book__price')) ).filter(price_or_median__gte=F('num_awards')).order_by('num_awards') self.assertQuerysetEqual( qs, [1, 3, 7, 9], lambda v: v.num_awards) qs2 = Publisher.objects.annotate( rating_or_num_awards=Greatest(Avg('book__rating'), F('num_awards'), output_field=FloatField()) ).filter(rating_or_num_awards__gt=F('num_awards')).order_by('num_awards') self.assertQuerysetEqual( qs2, [1, 3], lambda v: v.num_awards) def test_arguments_must_be_expressions(self): msg = 'QuerySet.aggregate() received non-expression(s): %s.' with self.assertRaisesMessage(TypeError, msg % FloatField()): Book.objects.aggregate(FloatField()) with self.assertRaisesMessage(TypeError, msg % True): Book.objects.aggregate(is_book=True) with self.assertRaisesMessage(TypeError, msg % ', '.join([str(FloatField()), 'True'])): Book.objects.aggregate(FloatField(), Avg('price'), is_book=True) def test_aggregation_subquery_annotation(self): """Subquery annotations are excluded from the GROUP BY if they are not explicitly grouped against.""" latest_book_pubdate_qs = Book.objects.filter( publisher=OuterRef('pk') ).order_by('-pubdate').values('pubdate')[:1] publisher_qs = Publisher.objects.annotate( latest_book_pubdate=Subquery(latest_book_pubdate_qs), ).annotate(count=Count('book')) with self.assertNumQueries(1) as ctx: list(publisher_qs) self.assertEqual(ctx[0]['sql'].count('SELECT'), 2) # The GROUP BY should not be by alias either. self.assertEqual(ctx[0]['sql'].lower().count('latest_book_pubdate'), 1) def test_aggregation_subquery_annotation_exists(self): latest_book_pubdate_qs = Book.objects.filter( publisher=OuterRef('pk') ).order_by('-pubdate').values('pubdate')[:1] publisher_qs = Publisher.objects.annotate( latest_book_pubdate=Subquery(latest_book_pubdate_qs), count=Count('book'), ) self.assertTrue(publisher_qs.exists()) def test_aggregation_exists_annotation(self): published_books = Book.objects.filter(publisher=OuterRef('pk')) publisher_qs = Publisher.objects.annotate( published_book=Exists(published_books), count=Count('book'), ).values_list('name', flat=True) self.assertCountEqual(list(publisher_qs), [ 'Apress', 'Morgan Kaufmann', "Jonno's House of Books", 'Prentice Hall', 'Sams', ]) @skipUnlessDBFeature('supports_subqueries_in_group_by') def test_group_by_subquery_annotation(self): """ Subquery annotations are included in the GROUP BY if they are grouped against. """ long_books_count_qs = Book.objects.filter( publisher=OuterRef('pk'), pages__gt=400, ).values( 'publisher' ).annotate(count=Count('pk')).values('count') long_books_count_breakdown = Publisher.objects.values_list( Subquery(long_books_count_qs, IntegerField()), ).annotate(total=Count('*')) self.assertEqual(dict(long_books_count_breakdown), {None: 1, 1: 4}) @skipUnlessDBFeature('supports_subqueries_in_group_by') def test_group_by_exists_annotation(self): """ Exists annotations are included in the GROUP BY if they are grouped against. """ long_books_qs = Book.objects.filter( publisher=OuterRef('pk'), pages__gt=800, ) has_long_books_breakdown = Publisher.objects.values_list( Exists(long_books_qs), ).annotate(total=Count('*')) self.assertEqual(dict(has_long_books_breakdown), {True: 2, False: 3}) def test_aggregation_subquery_annotation_related_field(self): publisher = Publisher.objects.create(name=self.a9.name, num_awards=2) book = Book.objects.create( isbn='159059999', name='Test book.', pages=819, rating=2.5, price=Decimal('14.44'), contact=self.a9, publisher=publisher, pubdate=datetime.date(2019, 12, 6), ) book.authors.add(self.a5, self.a6, self.a7) books_qs = Book.objects.annotate( contact_publisher=Subquery( Publisher.objects.filter( pk=OuterRef('publisher'), name=OuterRef('contact__name'), ).values('name')[:1], ) ).filter( contact_publisher__isnull=False, ).annotate(count=Count('authors')) self.assertSequenceEqual(books_qs, [book])
4c5fd4a3dc6534e7c63dc8466d7e8422e3631ab3fbed6613f504d23ab38d3d01
import datetime import sys import unittest from django.contrib.admin import ( AllValuesFieldListFilter, BooleanFieldListFilter, EmptyFieldListFilter, ModelAdmin, RelatedOnlyFieldListFilter, SimpleListFilter, site, ) from django.contrib.admin.options import IncorrectLookupParameters from django.contrib.auth.admin import UserAdmin from django.contrib.auth.models import User from django.core.exceptions import ImproperlyConfigured from django.test import RequestFactory, TestCase, override_settings from .models import Book, Bookmark, Department, Employee, TaggedItem def select_by(dictlist, key, value): return [x for x in dictlist if x[key] == value][0] class DecadeListFilter(SimpleListFilter): def lookups(self, request, model_admin): return ( ('the 80s', "the 1980's"), ('the 90s', "the 1990's"), ('the 00s', "the 2000's"), ('other', "other decades"), ) def queryset(self, request, queryset): decade = self.value() if decade == 'the 80s': return queryset.filter(year__gte=1980, year__lte=1989) if decade == 'the 90s': return queryset.filter(year__gte=1990, year__lte=1999) if decade == 'the 00s': return queryset.filter(year__gte=2000, year__lte=2009) class NotNinetiesListFilter(SimpleListFilter): title = "Not nineties books" parameter_name = "book_year" def lookups(self, request, model_admin): return ( ('the 90s', "the 1990's"), ) def queryset(self, request, queryset): if self.value() == 'the 90s': return queryset.filter(year__gte=1990, year__lte=1999) else: return queryset.exclude(year__gte=1990, year__lte=1999) class DecadeListFilterWithTitleAndParameter(DecadeListFilter): title = 'publication decade' parameter_name = 'publication-decade' class DecadeListFilterWithoutTitle(DecadeListFilter): parameter_name = 'publication-decade' class DecadeListFilterWithoutParameter(DecadeListFilter): title = 'publication decade' class DecadeListFilterWithNoneReturningLookups(DecadeListFilterWithTitleAndParameter): def lookups(self, request, model_admin): pass class DecadeListFilterWithFailingQueryset(DecadeListFilterWithTitleAndParameter): def queryset(self, request, queryset): raise 1 / 0 class DecadeListFilterWithQuerysetBasedLookups(DecadeListFilterWithTitleAndParameter): def lookups(self, request, model_admin): qs = model_admin.get_queryset(request) if qs.filter(year__gte=1980, year__lte=1989).exists(): yield ('the 80s', "the 1980's") if qs.filter(year__gte=1990, year__lte=1999).exists(): yield ('the 90s', "the 1990's") if qs.filter(year__gte=2000, year__lte=2009).exists(): yield ('the 00s', "the 2000's") class DecadeListFilterParameterEndsWith__In(DecadeListFilter): title = 'publication decade' parameter_name = 'decade__in' # Ends with '__in" class DecadeListFilterParameterEndsWith__Isnull(DecadeListFilter): title = 'publication decade' parameter_name = 'decade__isnull' # Ends with '__isnull" class DepartmentListFilterLookupWithNonStringValue(SimpleListFilter): title = 'department' parameter_name = 'department' def lookups(self, request, model_admin): return sorted({ (employee.department.id, # Intentionally not a string (Refs #19318) employee.department.code) for employee in model_admin.get_queryset(request).all() }) def queryset(self, request, queryset): if self.value(): return queryset.filter(department__id=self.value()) class DepartmentListFilterLookupWithUnderscoredParameter(DepartmentListFilterLookupWithNonStringValue): parameter_name = 'department__whatever' class DepartmentListFilterLookupWithDynamicValue(DecadeListFilterWithTitleAndParameter): def lookups(self, request, model_admin): if self.value() == 'the 80s': return (('the 90s', "the 1990's"),) elif self.value() == 'the 90s': return (('the 80s', "the 1980's"),) else: return (('the 80s', "the 1980's"), ('the 90s', "the 1990's"),) class CustomUserAdmin(UserAdmin): list_filter = ('books_authored', 'books_contributed') class BookAdmin(ModelAdmin): list_filter = ('year', 'author', 'contributors', 'is_best_seller', 'date_registered', 'no') ordering = ('-id',) class BookAdmin2(ModelAdmin): list_filter = ('year', 'author', 'contributors', 'is_best_seller2', 'date_registered', 'no') class BookAdminWithTupleBooleanFilter(BookAdmin): list_filter = ( 'year', 'author', 'contributors', ('is_best_seller', BooleanFieldListFilter), 'date_registered', 'no', ) class BookAdminWithUnderscoreLookupAndTuple(BookAdmin): list_filter = ( 'year', ('author__email', AllValuesFieldListFilter), 'contributors', 'is_best_seller', 'date_registered', 'no', ) class BookAdminWithCustomQueryset(ModelAdmin): def __init__(self, user, *args, **kwargs): self.user = user super().__init__(*args, **kwargs) list_filter = ('year',) def get_queryset(self, request): return super().get_queryset(request).filter(author=self.user) class BookAdminRelatedOnlyFilter(ModelAdmin): list_filter = ( 'year', 'is_best_seller', 'date_registered', 'no', ('author', RelatedOnlyFieldListFilter), ('contributors', RelatedOnlyFieldListFilter), ('employee__department', RelatedOnlyFieldListFilter), ) ordering = ('-id',) class DecadeFilterBookAdmin(ModelAdmin): list_filter = ('author', DecadeListFilterWithTitleAndParameter) ordering = ('-id',) class NotNinetiesListFilterAdmin(ModelAdmin): list_filter = (NotNinetiesListFilter,) class DecadeFilterBookAdminWithoutTitle(ModelAdmin): list_filter = (DecadeListFilterWithoutTitle,) class DecadeFilterBookAdminWithoutParameter(ModelAdmin): list_filter = (DecadeListFilterWithoutParameter,) class DecadeFilterBookAdminWithNoneReturningLookups(ModelAdmin): list_filter = (DecadeListFilterWithNoneReturningLookups,) class DecadeFilterBookAdminWithFailingQueryset(ModelAdmin): list_filter = (DecadeListFilterWithFailingQueryset,) class DecadeFilterBookAdminWithQuerysetBasedLookups(ModelAdmin): list_filter = (DecadeListFilterWithQuerysetBasedLookups,) class DecadeFilterBookAdminParameterEndsWith__In(ModelAdmin): list_filter = (DecadeListFilterParameterEndsWith__In,) class DecadeFilterBookAdminParameterEndsWith__Isnull(ModelAdmin): list_filter = (DecadeListFilterParameterEndsWith__Isnull,) class EmployeeAdmin(ModelAdmin): list_display = ['name', 'department'] list_filter = ['department'] class DepartmentFilterEmployeeAdmin(EmployeeAdmin): list_filter = [DepartmentListFilterLookupWithNonStringValue] class DepartmentFilterUnderscoredEmployeeAdmin(EmployeeAdmin): list_filter = [DepartmentListFilterLookupWithUnderscoredParameter] class DepartmentFilterDynamicValueBookAdmin(EmployeeAdmin): list_filter = [DepartmentListFilterLookupWithDynamicValue] class BookmarkAdminGenericRelation(ModelAdmin): list_filter = ['tags__tag'] class BookAdminWithEmptyFieldListFilter(ModelAdmin): list_filter = [ ('author', EmptyFieldListFilter), ('title', EmptyFieldListFilter), ] class DepartmentAdminWithEmptyFieldListFilter(ModelAdmin): list_filter = [('description', EmptyFieldListFilter)] class ListFiltersTests(TestCase): request_factory = RequestFactory() @classmethod def setUpTestData(cls): cls.today = datetime.date.today() cls.tomorrow = cls.today + datetime.timedelta(days=1) cls.one_week_ago = cls.today - datetime.timedelta(days=7) if cls.today.month == 12: cls.next_month = cls.today.replace(year=cls.today.year + 1, month=1, day=1) else: cls.next_month = cls.today.replace(month=cls.today.month + 1, day=1) cls.next_year = cls.today.replace(year=cls.today.year + 1, month=1, day=1) # Users cls.alfred = User.objects.create_superuser('alfred', '[email protected]', 'password') cls.bob = User.objects.create_user('bob', '[email protected]') cls.lisa = User.objects.create_user('lisa', '[email protected]') # Books cls.djangonaut_book = Book.objects.create( title='Djangonaut: an art of living', year=2009, author=cls.alfred, is_best_seller=True, date_registered=cls.today, is_best_seller2=True, ) cls.bio_book = Book.objects.create( title='Django: a biography', year=1999, author=cls.alfred, is_best_seller=False, no=207, is_best_seller2=False, ) cls.django_book = Book.objects.create( title='The Django Book', year=None, author=cls.bob, is_best_seller=None, date_registered=cls.today, no=103, is_best_seller2=None, ) cls.guitar_book = Book.objects.create( title='Guitar for dummies', year=2002, is_best_seller=True, date_registered=cls.one_week_ago, is_best_seller2=True, ) cls.guitar_book.contributors.set([cls.bob, cls.lisa]) # Departments cls.dev = Department.objects.create(code='DEV', description='Development') cls.design = Department.objects.create(code='DSN', description='Design') # Employees cls.john = Employee.objects.create(name='John Blue', department=cls.dev) cls.jack = Employee.objects.create(name='Jack Red', department=cls.design) def test_choicesfieldlistfilter_has_none_choice(self): """ The last choice is for the None value. """ class BookmarkChoicesAdmin(ModelAdmin): list_display = ['none_or_null'] list_filter = ['none_or_null'] modeladmin = BookmarkChoicesAdmin(Bookmark, site) request = self.request_factory.get('/', {}) request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) filterspec = changelist.get_filters(request)[0][0] choices = list(filterspec.choices(changelist)) self.assertEqual(choices[-1]['display'], 'None') self.assertEqual(choices[-1]['query_string'], '?none_or_null__isnull=True') def test_datefieldlistfilter(self): modeladmin = BookAdmin(Book, site) request = self.request_factory.get('/') request.user = self.alfred changelist = modeladmin.get_changelist(request) request = self.request_factory.get('/', { 'date_registered__gte': self.today, 'date_registered__lt': self.tomorrow}, ) request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) # Make sure the correct queryset is returned queryset = changelist.get_queryset(request) self.assertEqual(list(queryset), [self.django_book, self.djangonaut_book]) # Make sure the correct choice is selected filterspec = changelist.get_filters(request)[0][4] self.assertEqual(filterspec.title, 'date registered') choice = select_by(filterspec.choices(changelist), "display", "Today") self.assertIs(choice['selected'], True) self.assertEqual( choice['query_string'], '?date_registered__gte=%s&date_registered__lt=%s' % ( self.today, self.tomorrow, ) ) request = self.request_factory.get('/', { 'date_registered__gte': self.today.replace(day=1), 'date_registered__lt': self.next_month}, ) request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) # Make sure the correct queryset is returned queryset = changelist.get_queryset(request) if (self.today.year, self.today.month) == (self.one_week_ago.year, self.one_week_ago.month): # In case one week ago is in the same month. self.assertEqual(list(queryset), [self.guitar_book, self.django_book, self.djangonaut_book]) else: self.assertEqual(list(queryset), [self.django_book, self.djangonaut_book]) # Make sure the correct choice is selected filterspec = changelist.get_filters(request)[0][4] self.assertEqual(filterspec.title, 'date registered') choice = select_by(filterspec.choices(changelist), "display", "This month") self.assertIs(choice['selected'], True) self.assertEqual( choice['query_string'], '?date_registered__gte=%s&date_registered__lt=%s' % ( self.today.replace(day=1), self.next_month, ) ) request = self.request_factory.get('/', { 'date_registered__gte': self.today.replace(month=1, day=1), 'date_registered__lt': self.next_year}, ) request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) # Make sure the correct queryset is returned queryset = changelist.get_queryset(request) if self.today.year == self.one_week_ago.year: # In case one week ago is in the same year. self.assertEqual(list(queryset), [self.guitar_book, self.django_book, self.djangonaut_book]) else: self.assertEqual(list(queryset), [self.django_book, self.djangonaut_book]) # Make sure the correct choice is selected filterspec = changelist.get_filters(request)[0][4] self.assertEqual(filterspec.title, 'date registered') choice = select_by(filterspec.choices(changelist), "display", "This year") self.assertIs(choice['selected'], True) self.assertEqual( choice['query_string'], '?date_registered__gte=%s&date_registered__lt=%s' % ( self.today.replace(month=1, day=1), self.next_year, ) ) request = self.request_factory.get('/', { 'date_registered__gte': str(self.one_week_ago), 'date_registered__lt': str(self.tomorrow), }) request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) # Make sure the correct queryset is returned queryset = changelist.get_queryset(request) self.assertEqual(list(queryset), [self.guitar_book, self.django_book, self.djangonaut_book]) # Make sure the correct choice is selected filterspec = changelist.get_filters(request)[0][4] self.assertEqual(filterspec.title, 'date registered') choice = select_by(filterspec.choices(changelist), "display", "Past 7 days") self.assertIs(choice['selected'], True) self.assertEqual( choice['query_string'], '?date_registered__gte=%s&date_registered__lt=%s' % ( str(self.one_week_ago), str(self.tomorrow), ) ) # Null/not null queries request = self.request_factory.get('/', {'date_registered__isnull': 'True'}) request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) # Make sure the correct queryset is returned queryset = changelist.get_queryset(request) self.assertEqual(queryset.count(), 1) self.assertEqual(queryset[0], self.bio_book) # Make sure the correct choice is selected filterspec = changelist.get_filters(request)[0][4] self.assertEqual(filterspec.title, 'date registered') choice = select_by(filterspec.choices(changelist), 'display', 'No date') self.assertIs(choice['selected'], True) self.assertEqual(choice['query_string'], '?date_registered__isnull=True') request = self.request_factory.get('/', {'date_registered__isnull': 'False'}) request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) # Make sure the correct queryset is returned queryset = changelist.get_queryset(request) self.assertEqual(queryset.count(), 3) self.assertEqual(list(queryset), [self.guitar_book, self.django_book, self.djangonaut_book]) # Make sure the correct choice is selected filterspec = changelist.get_filters(request)[0][4] self.assertEqual(filterspec.title, 'date registered') choice = select_by(filterspec.choices(changelist), 'display', 'Has date') self.assertIs(choice['selected'], True) self.assertEqual(choice['query_string'], '?date_registered__isnull=False') @unittest.skipIf( sys.platform == 'win32', "Windows doesn't support setting a timezone that differs from the " "system timezone." ) @override_settings(USE_TZ=True) def test_datefieldlistfilter_with_time_zone_support(self): # Regression for #17830 self.test_datefieldlistfilter() def test_allvaluesfieldlistfilter(self): modeladmin = BookAdmin(Book, site) request = self.request_factory.get('/', {'year__isnull': 'True'}) request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) # Make sure the correct queryset is returned queryset = changelist.get_queryset(request) self.assertEqual(list(queryset), [self.django_book]) # Make sure the last choice is None and is selected filterspec = changelist.get_filters(request)[0][0] self.assertEqual(filterspec.title, 'year') choices = list(filterspec.choices(changelist)) self.assertIs(choices[-1]['selected'], True) self.assertEqual(choices[-1]['query_string'], '?year__isnull=True') request = self.request_factory.get('/', {'year': '2002'}) request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) # Make sure the correct choice is selected filterspec = changelist.get_filters(request)[0][0] self.assertEqual(filterspec.title, 'year') choices = list(filterspec.choices(changelist)) self.assertIs(choices[2]['selected'], True) self.assertEqual(choices[2]['query_string'], '?year=2002') def test_allvaluesfieldlistfilter_custom_qs(self): # Make sure that correct filters are returned with custom querysets modeladmin = BookAdminWithCustomQueryset(self.alfred, Book, site) request = self.request_factory.get('/') request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) filterspec = changelist.get_filters(request)[0][0] choices = list(filterspec.choices(changelist)) # Should have 'All', 1999 and 2009 options i.e. the subset of years of # books written by alfred (which is the filtering criteria set by # BookAdminWithCustomQueryset.get_queryset()) self.assertEqual(3, len(choices)) self.assertEqual(choices[0]['query_string'], '?') self.assertEqual(choices[1]['query_string'], '?year=1999') self.assertEqual(choices[2]['query_string'], '?year=2009') def test_relatedfieldlistfilter_foreignkey(self): modeladmin = BookAdmin(Book, site) request = self.request_factory.get('/') request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) # Make sure that all users are present in the author's list filter filterspec = changelist.get_filters(request)[0][1] expected = [(self.alfred.pk, 'alfred'), (self.bob.pk, 'bob'), (self.lisa.pk, 'lisa')] self.assertEqual(sorted(filterspec.lookup_choices), sorted(expected)) request = self.request_factory.get('/', {'author__isnull': 'True'}) request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) # Make sure the correct queryset is returned queryset = changelist.get_queryset(request) self.assertEqual(list(queryset), [self.guitar_book]) # Make sure the last choice is None and is selected filterspec = changelist.get_filters(request)[0][1] self.assertEqual(filterspec.title, 'Verbose Author') choices = list(filterspec.choices(changelist)) self.assertIs(choices[-1]['selected'], True) self.assertEqual(choices[-1]['query_string'], '?author__isnull=True') request = self.request_factory.get('/', {'author__id__exact': self.alfred.pk}) request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) # Make sure the correct choice is selected filterspec = changelist.get_filters(request)[0][1] self.assertEqual(filterspec.title, 'Verbose Author') # order of choices depends on User model, which has no order choice = select_by(filterspec.choices(changelist), "display", "alfred") self.assertIs(choice['selected'], True) self.assertEqual(choice['query_string'], '?author__id__exact=%d' % self.alfred.pk) def test_relatedfieldlistfilter_foreignkey_ordering(self): """RelatedFieldListFilter ordering respects ModelAdmin.ordering.""" class EmployeeAdminWithOrdering(ModelAdmin): ordering = ('name',) class BookAdmin(ModelAdmin): list_filter = ('employee',) site.register(Employee, EmployeeAdminWithOrdering) self.addCleanup(lambda: site.unregister(Employee)) modeladmin = BookAdmin(Book, site) request = self.request_factory.get('/') request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) filterspec = changelist.get_filters(request)[0][0] expected = [(self.jack.pk, 'Jack Red'), (self.john.pk, 'John Blue')] self.assertEqual(filterspec.lookup_choices, expected) def test_relatedfieldlistfilter_foreignkey_ordering_reverse(self): class EmployeeAdminWithOrdering(ModelAdmin): ordering = ('-name',) class BookAdmin(ModelAdmin): list_filter = ('employee',) site.register(Employee, EmployeeAdminWithOrdering) self.addCleanup(lambda: site.unregister(Employee)) modeladmin = BookAdmin(Book, site) request = self.request_factory.get('/') request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) filterspec = changelist.get_filters(request)[0][0] expected = [(self.john.pk, 'John Blue'), (self.jack.pk, 'Jack Red')] self.assertEqual(filterspec.lookup_choices, expected) def test_relatedfieldlistfilter_foreignkey_default_ordering(self): """RelatedFieldListFilter ordering respects Model.ordering.""" class BookAdmin(ModelAdmin): list_filter = ('employee',) self.addCleanup(setattr, Employee._meta, 'ordering', Employee._meta.ordering) Employee._meta.ordering = ('name',) modeladmin = BookAdmin(Book, site) request = self.request_factory.get('/') request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) filterspec = changelist.get_filters(request)[0][0] expected = [(self.jack.pk, 'Jack Red'), (self.john.pk, 'John Blue')] self.assertEqual(filterspec.lookup_choices, expected) def test_relatedfieldlistfilter_manytomany(self): modeladmin = BookAdmin(Book, site) request = self.request_factory.get('/') request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) # Make sure that all users are present in the contrib's list filter filterspec = changelist.get_filters(request)[0][2] expected = [(self.alfred.pk, 'alfred'), (self.bob.pk, 'bob'), (self.lisa.pk, 'lisa')] self.assertEqual(sorted(filterspec.lookup_choices), sorted(expected)) request = self.request_factory.get('/', {'contributors__isnull': 'True'}) request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) # Make sure the correct queryset is returned queryset = changelist.get_queryset(request) self.assertEqual(list(queryset), [self.django_book, self.bio_book, self.djangonaut_book]) # Make sure the last choice is None and is selected filterspec = changelist.get_filters(request)[0][2] self.assertEqual(filterspec.title, 'Verbose Contributors') choices = list(filterspec.choices(changelist)) self.assertIs(choices[-1]['selected'], True) self.assertEqual(choices[-1]['query_string'], '?contributors__isnull=True') request = self.request_factory.get('/', {'contributors__id__exact': self.bob.pk}) request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) # Make sure the correct choice is selected filterspec = changelist.get_filters(request)[0][2] self.assertEqual(filterspec.title, 'Verbose Contributors') choice = select_by(filterspec.choices(changelist), "display", "bob") self.assertIs(choice['selected'], True) self.assertEqual(choice['query_string'], '?contributors__id__exact=%d' % self.bob.pk) def test_relatedfieldlistfilter_reverse_relationships(self): modeladmin = CustomUserAdmin(User, site) # FK relationship ----- request = self.request_factory.get('/', {'books_authored__isnull': 'True'}) request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) # Make sure the correct queryset is returned queryset = changelist.get_queryset(request) self.assertEqual(list(queryset), [self.lisa]) # Make sure the last choice is None and is selected filterspec = changelist.get_filters(request)[0][0] self.assertEqual(filterspec.title, 'book') choices = list(filterspec.choices(changelist)) self.assertIs(choices[-1]['selected'], True) self.assertEqual(choices[-1]['query_string'], '?books_authored__isnull=True') request = self.request_factory.get('/', {'books_authored__id__exact': self.bio_book.pk}) request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) # Make sure the correct choice is selected filterspec = changelist.get_filters(request)[0][0] self.assertEqual(filterspec.title, 'book') choice = select_by(filterspec.choices(changelist), "display", self.bio_book.title) self.assertIs(choice['selected'], True) self.assertEqual(choice['query_string'], '?books_authored__id__exact=%d' % self.bio_book.pk) # M2M relationship ----- request = self.request_factory.get('/', {'books_contributed__isnull': 'True'}) request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) # Make sure the correct queryset is returned queryset = changelist.get_queryset(request) self.assertEqual(list(queryset), [self.alfred]) # Make sure the last choice is None and is selected filterspec = changelist.get_filters(request)[0][1] self.assertEqual(filterspec.title, 'book') choices = list(filterspec.choices(changelist)) self.assertIs(choices[-1]['selected'], True) self.assertEqual(choices[-1]['query_string'], '?books_contributed__isnull=True') request = self.request_factory.get('/', {'books_contributed__id__exact': self.django_book.pk}) request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) # Make sure the correct choice is selected filterspec = changelist.get_filters(request)[0][1] self.assertEqual(filterspec.title, 'book') choice = select_by(filterspec.choices(changelist), "display", self.django_book.title) self.assertIs(choice['selected'], True) self.assertEqual(choice['query_string'], '?books_contributed__id__exact=%d' % self.django_book.pk) # With one book, the list filter should appear because there is also a # (None) option. Book.objects.exclude(pk=self.djangonaut_book.pk).delete() filterspec = changelist.get_filters(request)[0] self.assertEqual(len(filterspec), 2) # With no books remaining, no list filters should appear. Book.objects.all().delete() filterspec = changelist.get_filters(request)[0] self.assertEqual(len(filterspec), 0) def test_relatedfieldlistfilter_reverse_relationships_default_ordering(self): self.addCleanup(setattr, Book._meta, 'ordering', Book._meta.ordering) Book._meta.ordering = ('title',) modeladmin = CustomUserAdmin(User, site) request = self.request_factory.get('/') request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) filterspec = changelist.get_filters(request)[0][0] expected = [ (self.bio_book.pk, 'Django: a biography'), (self.djangonaut_book.pk, 'Djangonaut: an art of living'), (self.guitar_book.pk, 'Guitar for dummies'), (self.django_book.pk, 'The Django Book') ] self.assertEqual(filterspec.lookup_choices, expected) def test_relatedonlyfieldlistfilter_foreignkey(self): modeladmin = BookAdminRelatedOnlyFilter(Book, site) request = self.request_factory.get('/') request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) # Make sure that only actual authors are present in author's list filter filterspec = changelist.get_filters(request)[0][4] expected = [(self.alfred.pk, 'alfred'), (self.bob.pk, 'bob')] self.assertEqual(sorted(filterspec.lookup_choices), sorted(expected)) def test_relatedonlyfieldlistfilter_foreignkey_reverse_relationships(self): class EmployeeAdminReverseRelationship(ModelAdmin): list_filter = ( ('book', RelatedOnlyFieldListFilter), ) self.djangonaut_book.employee = self.john self.djangonaut_book.save() self.django_book.employee = self.jack self.django_book.save() modeladmin = EmployeeAdminReverseRelationship(Employee, site) request = self.request_factory.get('/') request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) filterspec = changelist.get_filters(request)[0][0] self.assertEqual(filterspec.lookup_choices, [ (self.djangonaut_book.pk, 'Djangonaut: an art of living'), (self.django_book.pk, 'The Django Book'), ]) def test_relatedonlyfieldlistfilter_manytomany_reverse_relationships(self): class UserAdminReverseRelationship(ModelAdmin): list_filter = ( ('books_contributed', RelatedOnlyFieldListFilter), ) modeladmin = UserAdminReverseRelationship(User, site) request = self.request_factory.get('/') request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) filterspec = changelist.get_filters(request)[0][0] self.assertEqual( filterspec.lookup_choices, [(self.guitar_book.pk, 'Guitar for dummies')], ) def test_relatedonlyfieldlistfilter_foreignkey_ordering(self): """RelatedOnlyFieldListFilter ordering respects ModelAdmin.ordering.""" class EmployeeAdminWithOrdering(ModelAdmin): ordering = ('name',) class BookAdmin(ModelAdmin): list_filter = ( ('employee', RelatedOnlyFieldListFilter), ) albert = Employee.objects.create(name='Albert Green', department=self.dev) self.djangonaut_book.employee = albert self.djangonaut_book.save() self.bio_book.employee = self.jack self.bio_book.save() site.register(Employee, EmployeeAdminWithOrdering) self.addCleanup(lambda: site.unregister(Employee)) modeladmin = BookAdmin(Book, site) request = self.request_factory.get('/') request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) filterspec = changelist.get_filters(request)[0][0] expected = [(albert.pk, 'Albert Green'), (self.jack.pk, 'Jack Red')] self.assertEqual(filterspec.lookup_choices, expected) def test_relatedonlyfieldlistfilter_foreignkey_default_ordering(self): """RelatedOnlyFieldListFilter ordering respects Meta.ordering.""" class BookAdmin(ModelAdmin): list_filter = ( ('employee', RelatedOnlyFieldListFilter), ) albert = Employee.objects.create(name='Albert Green', department=self.dev) self.djangonaut_book.employee = albert self.djangonaut_book.save() self.bio_book.employee = self.jack self.bio_book.save() self.addCleanup(setattr, Employee._meta, 'ordering', Employee._meta.ordering) Employee._meta.ordering = ('name',) modeladmin = BookAdmin(Book, site) request = self.request_factory.get('/') request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) filterspec = changelist.get_filters(request)[0][0] expected = [(albert.pk, 'Albert Green'), (self.jack.pk, 'Jack Red')] self.assertEqual(filterspec.lookup_choices, expected) def test_relatedonlyfieldlistfilter_underscorelookup_foreignkey(self): Department.objects.create(code='TEST', description='Testing') self.djangonaut_book.employee = self.john self.djangonaut_book.save() self.bio_book.employee = self.jack self.bio_book.save() modeladmin = BookAdminRelatedOnlyFilter(Book, site) request = self.request_factory.get('/') request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) # Only actual departments should be present in employee__department's # list filter. filterspec = changelist.get_filters(request)[0][6] expected = [ (self.dev.code, str(self.dev)), (self.design.code, str(self.design)), ] self.assertEqual(sorted(filterspec.lookup_choices), sorted(expected)) def test_relatedonlyfieldlistfilter_manytomany(self): modeladmin = BookAdminRelatedOnlyFilter(Book, site) request = self.request_factory.get('/') request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) # Make sure that only actual contributors are present in contrib's list filter filterspec = changelist.get_filters(request)[0][5] expected = [(self.bob.pk, 'bob'), (self.lisa.pk, 'lisa')] self.assertEqual(sorted(filterspec.lookup_choices), sorted(expected)) def test_listfilter_genericrelation(self): django_bookmark = Bookmark.objects.create(url='https://www.djangoproject.com/') python_bookmark = Bookmark.objects.create(url='https://www.python.org/') kernel_bookmark = Bookmark.objects.create(url='https://www.kernel.org/') TaggedItem.objects.create(content_object=django_bookmark, tag='python') TaggedItem.objects.create(content_object=python_bookmark, tag='python') TaggedItem.objects.create(content_object=kernel_bookmark, tag='linux') modeladmin = BookmarkAdminGenericRelation(Bookmark, site) request = self.request_factory.get('/', {'tags__tag': 'python'}) request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) queryset = changelist.get_queryset(request) expected = [python_bookmark, django_bookmark] self.assertEqual(list(queryset), expected) def test_booleanfieldlistfilter(self): modeladmin = BookAdmin(Book, site) self.verify_booleanfieldlistfilter(modeladmin) def test_booleanfieldlistfilter_tuple(self): modeladmin = BookAdminWithTupleBooleanFilter(Book, site) self.verify_booleanfieldlistfilter(modeladmin) def verify_booleanfieldlistfilter(self, modeladmin): request = self.request_factory.get('/') request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) request = self.request_factory.get('/', {'is_best_seller__exact': 0}) request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) # Make sure the correct queryset is returned queryset = changelist.get_queryset(request) self.assertEqual(list(queryset), [self.bio_book]) # Make sure the correct choice is selected filterspec = changelist.get_filters(request)[0][3] self.assertEqual(filterspec.title, 'is best seller') choice = select_by(filterspec.choices(changelist), "display", "No") self.assertIs(choice['selected'], True) self.assertEqual(choice['query_string'], '?is_best_seller__exact=0') request = self.request_factory.get('/', {'is_best_seller__exact': 1}) request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) # Make sure the correct queryset is returned queryset = changelist.get_queryset(request) self.assertEqual(list(queryset), [self.guitar_book, self.djangonaut_book]) # Make sure the correct choice is selected filterspec = changelist.get_filters(request)[0][3] self.assertEqual(filterspec.title, 'is best seller') choice = select_by(filterspec.choices(changelist), "display", "Yes") self.assertIs(choice['selected'], True) self.assertEqual(choice['query_string'], '?is_best_seller__exact=1') request = self.request_factory.get('/', {'is_best_seller__isnull': 'True'}) request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) # Make sure the correct queryset is returned queryset = changelist.get_queryset(request) self.assertEqual(list(queryset), [self.django_book]) # Make sure the correct choice is selected filterspec = changelist.get_filters(request)[0][3] self.assertEqual(filterspec.title, 'is best seller') choice = select_by(filterspec.choices(changelist), "display", "Unknown") self.assertIs(choice['selected'], True) self.assertEqual(choice['query_string'], '?is_best_seller__isnull=True') def test_booleanfieldlistfilter_nullbooleanfield(self): modeladmin = BookAdmin2(Book, site) request = self.request_factory.get('/') request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) request = self.request_factory.get('/', {'is_best_seller2__exact': 0}) request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) # Make sure the correct queryset is returned queryset = changelist.get_queryset(request) self.assertEqual(list(queryset), [self.bio_book]) # Make sure the correct choice is selected filterspec = changelist.get_filters(request)[0][3] self.assertEqual(filterspec.title, 'is best seller2') choice = select_by(filterspec.choices(changelist), "display", "No") self.assertIs(choice['selected'], True) self.assertEqual(choice['query_string'], '?is_best_seller2__exact=0') request = self.request_factory.get('/', {'is_best_seller2__exact': 1}) request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) # Make sure the correct queryset is returned queryset = changelist.get_queryset(request) self.assertEqual(list(queryset), [self.guitar_book, self.djangonaut_book]) # Make sure the correct choice is selected filterspec = changelist.get_filters(request)[0][3] self.assertEqual(filterspec.title, 'is best seller2') choice = select_by(filterspec.choices(changelist), "display", "Yes") self.assertIs(choice['selected'], True) self.assertEqual(choice['query_string'], '?is_best_seller2__exact=1') request = self.request_factory.get('/', {'is_best_seller2__isnull': 'True'}) request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) # Make sure the correct queryset is returned queryset = changelist.get_queryset(request) self.assertEqual(list(queryset), [self.django_book]) # Make sure the correct choice is selected filterspec = changelist.get_filters(request)[0][3] self.assertEqual(filterspec.title, 'is best seller2') choice = select_by(filterspec.choices(changelist), "display", "Unknown") self.assertIs(choice['selected'], True) self.assertEqual(choice['query_string'], '?is_best_seller2__isnull=True') def test_fieldlistfilter_underscorelookup_tuple(self): """ Ensure ('fieldpath', ClassName ) lookups pass lookup_allowed checks when fieldpath contains double underscore in value (#19182). """ modeladmin = BookAdminWithUnderscoreLookupAndTuple(Book, site) request = self.request_factory.get('/') request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) request = self.request_factory.get('/', {'author__email': '[email protected]'}) request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) # Make sure the correct queryset is returned queryset = changelist.get_queryset(request) self.assertEqual(list(queryset), [self.bio_book, self.djangonaut_book]) def test_fieldlistfilter_invalid_lookup_parameters(self): """Filtering by an invalid value.""" modeladmin = BookAdmin(Book, site) request = self.request_factory.get('/', {'author__id__exact': 'StringNotInteger!'}) request.user = self.alfred with self.assertRaises(IncorrectLookupParameters): modeladmin.get_changelist_instance(request) def test_simplelistfilter(self): modeladmin = DecadeFilterBookAdmin(Book, site) # Make sure that the first option is 'All' --------------------------- request = self.request_factory.get('/', {}) request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) # Make sure the correct queryset is returned queryset = changelist.get_queryset(request) self.assertEqual(list(queryset), list(Book.objects.all().order_by('-id'))) # Make sure the correct choice is selected filterspec = changelist.get_filters(request)[0][1] self.assertEqual(filterspec.title, 'publication decade') choices = list(filterspec.choices(changelist)) self.assertEqual(choices[0]['display'], 'All') self.assertIs(choices[0]['selected'], True) self.assertEqual(choices[0]['query_string'], '?') # Look for books in the 1980s ---------------------------------------- request = self.request_factory.get('/', {'publication-decade': 'the 80s'}) request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) # Make sure the correct queryset is returned queryset = changelist.get_queryset(request) self.assertEqual(list(queryset), []) # Make sure the correct choice is selected filterspec = changelist.get_filters(request)[0][1] self.assertEqual(filterspec.title, 'publication decade') choices = list(filterspec.choices(changelist)) self.assertEqual(choices[1]['display'], 'the 1980\'s') self.assertIs(choices[1]['selected'], True) self.assertEqual(choices[1]['query_string'], '?publication-decade=the+80s') # Look for books in the 1990s ---------------------------------------- request = self.request_factory.get('/', {'publication-decade': 'the 90s'}) request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) # Make sure the correct queryset is returned queryset = changelist.get_queryset(request) self.assertEqual(list(queryset), [self.bio_book]) # Make sure the correct choice is selected filterspec = changelist.get_filters(request)[0][1] self.assertEqual(filterspec.title, 'publication decade') choices = list(filterspec.choices(changelist)) self.assertEqual(choices[2]['display'], 'the 1990\'s') self.assertIs(choices[2]['selected'], True) self.assertEqual(choices[2]['query_string'], '?publication-decade=the+90s') # Look for books in the 2000s ---------------------------------------- request = self.request_factory.get('/', {'publication-decade': 'the 00s'}) request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) # Make sure the correct queryset is returned queryset = changelist.get_queryset(request) self.assertEqual(list(queryset), [self.guitar_book, self.djangonaut_book]) # Make sure the correct choice is selected filterspec = changelist.get_filters(request)[0][1] self.assertEqual(filterspec.title, 'publication decade') choices = list(filterspec.choices(changelist)) self.assertEqual(choices[3]['display'], 'the 2000\'s') self.assertIs(choices[3]['selected'], True) self.assertEqual(choices[3]['query_string'], '?publication-decade=the+00s') # Combine multiple filters ------------------------------------------- request = self.request_factory.get('/', {'publication-decade': 'the 00s', 'author__id__exact': self.alfred.pk}) request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) # Make sure the correct queryset is returned queryset = changelist.get_queryset(request) self.assertEqual(list(queryset), [self.djangonaut_book]) # Make sure the correct choices are selected filterspec = changelist.get_filters(request)[0][1] self.assertEqual(filterspec.title, 'publication decade') choices = list(filterspec.choices(changelist)) self.assertEqual(choices[3]['display'], 'the 2000\'s') self.assertIs(choices[3]['selected'], True) self.assertEqual( choices[3]['query_string'], '?author__id__exact=%s&publication-decade=the+00s' % self.alfred.pk ) filterspec = changelist.get_filters(request)[0][0] self.assertEqual(filterspec.title, 'Verbose Author') choice = select_by(filterspec.choices(changelist), "display", "alfred") self.assertIs(choice['selected'], True) self.assertEqual(choice['query_string'], '?author__id__exact=%s&publication-decade=the+00s' % self.alfred.pk) def test_listfilter_without_title(self): """ Any filter must define a title. """ modeladmin = DecadeFilterBookAdminWithoutTitle(Book, site) request = self.request_factory.get('/', {}) request.user = self.alfred msg = "The list filter 'DecadeListFilterWithoutTitle' does not specify a 'title'." with self.assertRaisesMessage(ImproperlyConfigured, msg): modeladmin.get_changelist_instance(request) def test_simplelistfilter_without_parameter(self): """ Any SimpleListFilter must define a parameter_name. """ modeladmin = DecadeFilterBookAdminWithoutParameter(Book, site) request = self.request_factory.get('/', {}) request.user = self.alfred msg = "The list filter 'DecadeListFilterWithoutParameter' does not specify a 'parameter_name'." with self.assertRaisesMessage(ImproperlyConfigured, msg): modeladmin.get_changelist_instance(request) def test_simplelistfilter_with_none_returning_lookups(self): """ A SimpleListFilter lookups method can return None but disables the filter completely. """ modeladmin = DecadeFilterBookAdminWithNoneReturningLookups(Book, site) request = self.request_factory.get('/', {}) request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) filterspec = changelist.get_filters(request)[0] self.assertEqual(len(filterspec), 0) def test_filter_with_failing_queryset(self): """ When a filter's queryset method fails, it fails loudly and the corresponding exception doesn't get swallowed (#17828). """ modeladmin = DecadeFilterBookAdminWithFailingQueryset(Book, site) request = self.request_factory.get('/', {}) request.user = self.alfred with self.assertRaises(ZeroDivisionError): modeladmin.get_changelist_instance(request) def test_simplelistfilter_with_queryset_based_lookups(self): modeladmin = DecadeFilterBookAdminWithQuerysetBasedLookups(Book, site) request = self.request_factory.get('/', {}) request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) filterspec = changelist.get_filters(request)[0][0] self.assertEqual(filterspec.title, 'publication decade') choices = list(filterspec.choices(changelist)) self.assertEqual(len(choices), 3) self.assertEqual(choices[0]['display'], 'All') self.assertIs(choices[0]['selected'], True) self.assertEqual(choices[0]['query_string'], '?') self.assertEqual(choices[1]['display'], 'the 1990\'s') self.assertIs(choices[1]['selected'], False) self.assertEqual(choices[1]['query_string'], '?publication-decade=the+90s') self.assertEqual(choices[2]['display'], 'the 2000\'s') self.assertIs(choices[2]['selected'], False) self.assertEqual(choices[2]['query_string'], '?publication-decade=the+00s') def test_two_characters_long_field(self): """ list_filter works with two-characters long field names (#16080). """ modeladmin = BookAdmin(Book, site) request = self.request_factory.get('/', {'no': '207'}) request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) # Make sure the correct queryset is returned queryset = changelist.get_queryset(request) self.assertEqual(list(queryset), [self.bio_book]) filterspec = changelist.get_filters(request)[0][-1] self.assertEqual(filterspec.title, 'number') choices = list(filterspec.choices(changelist)) self.assertIs(choices[2]['selected'], True) self.assertEqual(choices[2]['query_string'], '?no=207') def test_parameter_ends_with__in__or__isnull(self): """ A SimpleListFilter's parameter name is not mistaken for a model field if it ends with '__isnull' or '__in' (#17091). """ # When it ends with '__in' ----------------------------------------- modeladmin = DecadeFilterBookAdminParameterEndsWith__In(Book, site) request = self.request_factory.get('/', {'decade__in': 'the 90s'}) request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) # Make sure the correct queryset is returned queryset = changelist.get_queryset(request) self.assertEqual(list(queryset), [self.bio_book]) # Make sure the correct choice is selected filterspec = changelist.get_filters(request)[0][0] self.assertEqual(filterspec.title, 'publication decade') choices = list(filterspec.choices(changelist)) self.assertEqual(choices[2]['display'], 'the 1990\'s') self.assertIs(choices[2]['selected'], True) self.assertEqual(choices[2]['query_string'], '?decade__in=the+90s') # When it ends with '__isnull' --------------------------------------- modeladmin = DecadeFilterBookAdminParameterEndsWith__Isnull(Book, site) request = self.request_factory.get('/', {'decade__isnull': 'the 90s'}) request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) # Make sure the correct queryset is returned queryset = changelist.get_queryset(request) self.assertEqual(list(queryset), [self.bio_book]) # Make sure the correct choice is selected filterspec = changelist.get_filters(request)[0][0] self.assertEqual(filterspec.title, 'publication decade') choices = list(filterspec.choices(changelist)) self.assertEqual(choices[2]['display'], 'the 1990\'s') self.assertIs(choices[2]['selected'], True) self.assertEqual(choices[2]['query_string'], '?decade__isnull=the+90s') def test_lookup_with_non_string_value(self): """ Ensure choices are set the selected class when using non-string values for lookups in SimpleListFilters (#19318). """ modeladmin = DepartmentFilterEmployeeAdmin(Employee, site) request = self.request_factory.get('/', {'department': self.john.department.pk}) request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) queryset = changelist.get_queryset(request) self.assertEqual(list(queryset), [self.john]) filterspec = changelist.get_filters(request)[0][-1] self.assertEqual(filterspec.title, 'department') choices = list(filterspec.choices(changelist)) self.assertEqual(choices[1]['display'], 'DEV') self.assertIs(choices[1]['selected'], True) self.assertEqual(choices[1]['query_string'], '?department=%s' % self.john.department.pk) def test_lookup_with_non_string_value_underscored(self): """ Ensure SimpleListFilter lookups pass lookup_allowed checks when parameter_name attribute contains double-underscore value (#19182). """ modeladmin = DepartmentFilterUnderscoredEmployeeAdmin(Employee, site) request = self.request_factory.get('/', {'department__whatever': self.john.department.pk}) request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) queryset = changelist.get_queryset(request) self.assertEqual(list(queryset), [self.john]) filterspec = changelist.get_filters(request)[0][-1] self.assertEqual(filterspec.title, 'department') choices = list(filterspec.choices(changelist)) self.assertEqual(choices[1]['display'], 'DEV') self.assertIs(choices[1]['selected'], True) self.assertEqual(choices[1]['query_string'], '?department__whatever=%s' % self.john.department.pk) def test_fk_with_to_field(self): """ A filter on a FK respects the FK's to_field attribute (#17972). """ modeladmin = EmployeeAdmin(Employee, site) request = self.request_factory.get('/', {}) request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) # Make sure the correct queryset is returned queryset = changelist.get_queryset(request) self.assertEqual(list(queryset), [self.jack, self.john]) filterspec = changelist.get_filters(request)[0][-1] self.assertEqual(filterspec.title, 'department') choices = list(filterspec.choices(changelist)) self.assertEqual(choices[0]['display'], 'All') self.assertIs(choices[0]['selected'], True) self.assertEqual(choices[0]['query_string'], '?') self.assertEqual(choices[1]['display'], 'Development') self.assertIs(choices[1]['selected'], False) self.assertEqual(choices[1]['query_string'], '?department__code__exact=DEV') self.assertEqual(choices[2]['display'], 'Design') self.assertIs(choices[2]['selected'], False) self.assertEqual(choices[2]['query_string'], '?department__code__exact=DSN') # Filter by Department=='Development' -------------------------------- request = self.request_factory.get('/', {'department__code__exact': 'DEV'}) request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) # Make sure the correct queryset is returned queryset = changelist.get_queryset(request) self.assertEqual(list(queryset), [self.john]) filterspec = changelist.get_filters(request)[0][-1] self.assertEqual(filterspec.title, 'department') choices = list(filterspec.choices(changelist)) self.assertEqual(choices[0]['display'], 'All') self.assertIs(choices[0]['selected'], False) self.assertEqual(choices[0]['query_string'], '?') self.assertEqual(choices[1]['display'], 'Development') self.assertIs(choices[1]['selected'], True) self.assertEqual(choices[1]['query_string'], '?department__code__exact=DEV') self.assertEqual(choices[2]['display'], 'Design') self.assertIs(choices[2]['selected'], False) self.assertEqual(choices[2]['query_string'], '?department__code__exact=DSN') def test_lookup_with_dynamic_value(self): """ Ensure SimpleListFilter can access self.value() inside the lookup. """ modeladmin = DepartmentFilterDynamicValueBookAdmin(Book, site) def _test_choices(request, expected_displays): request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) filterspec = changelist.get_filters(request)[0][0] self.assertEqual(filterspec.title, 'publication decade') choices = tuple(c['display'] for c in filterspec.choices(changelist)) self.assertEqual(choices, expected_displays) _test_choices(self.request_factory.get('/', {}), ("All", "the 1980's", "the 1990's")) _test_choices(self.request_factory.get('/', {'publication-decade': 'the 80s'}), ("All", "the 1990's")) _test_choices(self.request_factory.get('/', {'publication-decade': 'the 90s'}), ("All", "the 1980's")) def test_list_filter_queryset_filtered_by_default(self): """ A list filter that filters the queryset by default gives the correct full_result_count. """ modeladmin = NotNinetiesListFilterAdmin(Book, site) request = self.request_factory.get('/', {}) request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) changelist.get_results(request) self.assertEqual(changelist.full_result_count, 4) def test_emptylistfieldfilter(self): empty_description = Department.objects.create(code='EMPT', description='') none_description = Department.objects.create(code='NONE', description=None) empty_title = Book.objects.create(title='', author=self.alfred) department_admin = DepartmentAdminWithEmptyFieldListFilter(Department, site) book_admin = BookAdminWithEmptyFieldListFilter(Book, site) tests = [ # Allows nulls and empty strings. ( department_admin, {'description__isempty': '1'}, [empty_description, none_description], ), ( department_admin, {'description__isempty': '0'}, [self.dev, self.design], ), # Allows nulls. (book_admin, {'author__isempty': '1'}, [self.guitar_book]), ( book_admin, {'author__isempty': '0'}, [self.django_book, self.bio_book, self.djangonaut_book, empty_title], ), # Allows empty strings. (book_admin, {'title__isempty': '1'}, [empty_title]), ( book_admin, {'title__isempty': '0'}, [self.django_book, self.bio_book, self.djangonaut_book, self.guitar_book], ), ] for modeladmin, query_string, expected_result in tests: with self.subTest( modeladmin=modeladmin.__class__.__name__, query_string=query_string, ): request = self.request_factory.get('/', query_string) request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) queryset = changelist.get_queryset(request) self.assertCountEqual(queryset, expected_result) def test_emptylistfieldfilter_choices(self): modeladmin = BookAdminWithEmptyFieldListFilter(Book, site) request = self.request_factory.get('/') request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) filterspec = changelist.get_filters(request)[0][0] self.assertEqual(filterspec.title, 'Verbose Author') choices = list(filterspec.choices(changelist)) self.assertEqual(len(choices), 3) self.assertEqual(choices[0]['display'], 'All') self.assertIs(choices[0]['selected'], True) self.assertEqual(choices[0]['query_string'], '?') self.assertEqual(choices[1]['display'], 'Empty') self.assertIs(choices[1]['selected'], False) self.assertEqual(choices[1]['query_string'], '?author__isempty=1') self.assertEqual(choices[2]['display'], 'Not empty') self.assertIs(choices[2]['selected'], False) self.assertEqual(choices[2]['query_string'], '?author__isempty=0') def test_emptylistfieldfilter_non_empty_field(self): class EmployeeAdminWithEmptyFieldListFilter(ModelAdmin): list_filter = [('department', EmptyFieldListFilter)] modeladmin = EmployeeAdminWithEmptyFieldListFilter(Employee, site) request = self.request_factory.get('/') request.user = self.alfred msg = ( "The list filter 'EmptyFieldListFilter' cannot be used with field " "'department' which doesn't allow empty strings and nulls." ) with self.assertRaisesMessage(ImproperlyConfigured, msg): modeladmin.get_changelist_instance(request) def test_emptylistfieldfilter_invalid_lookup_parameters(self): modeladmin = BookAdminWithEmptyFieldListFilter(Book, site) request = self.request_factory.get('/', {'author__isempty': 42}) request.user = self.alfred with self.assertRaises(IncorrectLookupParameters): modeladmin.get_changelist_instance(request)
ef4f763713c5cba45b9fc9ebe364c6011d1ce8ee58086032a96837b784c29de7
import time from django.core.exceptions import ImproperlyConfigured from django.http import HttpResponse from django.test import RequestFactory, SimpleTestCase, override_settings from django.test.utils import require_jinja2 from django.urls import resolve from django.views.generic import RedirectView, TemplateView, View from . import views class SimpleView(View): """ A simple view with a docstring. """ def get(self, request): return HttpResponse('This is a simple view') class SimplePostView(SimpleView): post = SimpleView.get class PostOnlyView(View): def post(self, request): return HttpResponse('This view only accepts POST') class CustomizableView(SimpleView): parameter = {} def decorator(view): view.is_decorated = True return view class DecoratedDispatchView(SimpleView): @decorator def dispatch(self, request, *args, **kwargs): return super().dispatch(request, *args, **kwargs) class AboutTemplateView(TemplateView): def get(self, request): return self.render_to_response({}) def get_template_names(self): return ['generic_views/about.html'] class AboutTemplateAttributeView(TemplateView): template_name = 'generic_views/about.html' def get(self, request): return self.render_to_response(context={}) class InstanceView(View): def get(self, request): return self class ViewTest(SimpleTestCase): rf = RequestFactory() def _assert_simple(self, response): self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b'This is a simple view') def test_no_init_kwargs(self): """ A view can't be accidentally instantiated before deployment """ msg = 'This method is available only on the class, not on instances.' with self.assertRaisesMessage(AttributeError, msg): SimpleView(key='value').as_view() def test_no_init_args(self): """ A view can't be accidentally instantiated before deployment """ msg = 'as_view() takes 1 positional argument but 2 were given' with self.assertRaisesMessage(TypeError, msg): SimpleView.as_view('value') def test_pathological_http_method(self): """ The edge case of a http request that spoofs an existing method name is caught. """ self.assertEqual(SimpleView.as_view()( self.rf.get('/', REQUEST_METHOD='DISPATCH') ).status_code, 405) def test_get_only(self): """ Test a view which only allows GET doesn't allow other methods. """ self._assert_simple(SimpleView.as_view()(self.rf.get('/'))) self.assertEqual(SimpleView.as_view()(self.rf.post('/')).status_code, 405) self.assertEqual(SimpleView.as_view()( self.rf.get('/', REQUEST_METHOD='FAKE') ).status_code, 405) def test_get_and_head(self): """ Test a view which supplies a GET method also responds correctly to HEAD. """ self._assert_simple(SimpleView.as_view()(self.rf.get('/'))) response = SimpleView.as_view()(self.rf.head('/')) self.assertEqual(response.status_code, 200) def test_setup_get_and_head(self): view_instance = SimpleView() self.assertFalse(hasattr(view_instance, 'head')) view_instance.setup(self.rf.get('/')) self.assertTrue(hasattr(view_instance, 'head')) self.assertEqual(view_instance.head, view_instance.get) def test_head_no_get(self): """ Test a view which supplies no GET method responds to HEAD with HTTP 405. """ response = PostOnlyView.as_view()(self.rf.head('/')) self.assertEqual(response.status_code, 405) def test_get_and_post(self): """ Test a view which only allows both GET and POST. """ self._assert_simple(SimplePostView.as_view()(self.rf.get('/'))) self._assert_simple(SimplePostView.as_view()(self.rf.post('/'))) self.assertEqual(SimplePostView.as_view()( self.rf.get('/', REQUEST_METHOD='FAKE') ).status_code, 405) def test_invalid_keyword_argument(self): """ View arguments must be predefined on the class and can't be named like a HTTP method. """ msg = ( 'The method name %s is not accepted as a keyword argument to ' 'SimpleView().' ) # Check each of the allowed method names for method in SimpleView.http_method_names: with self.assertRaisesMessage(TypeError, msg % method): SimpleView.as_view(**{method: 'value'}) # Check the case view argument is ok if predefined on the class... CustomizableView.as_view(parameter="value") # ...but raises errors otherwise. msg = ( "CustomizableView() received an invalid keyword 'foobar'. " "as_view only accepts arguments that are already attributes of " "the class." ) with self.assertRaisesMessage(TypeError, msg): CustomizableView.as_view(foobar="value") def test_calling_more_than_once(self): """ Test a view can only be called once. """ request = self.rf.get('/') view = InstanceView.as_view() self.assertNotEqual(view(request), view(request)) def test_class_attributes(self): """ The callable returned from as_view() has proper docstring, name and module. """ self.assertEqual(SimpleView.__doc__, SimpleView.as_view().__doc__) self.assertEqual(SimpleView.__name__, SimpleView.as_view().__name__) self.assertEqual(SimpleView.__module__, SimpleView.as_view().__module__) def test_dispatch_decoration(self): """ Attributes set by decorators on the dispatch method are also present on the closure. """ self.assertTrue(DecoratedDispatchView.as_view().is_decorated) def test_options(self): """ Views respond to HTTP OPTIONS requests with an Allow header appropriate for the methods implemented by the view class. """ request = self.rf.options('/') view = SimpleView.as_view() response = view(request) self.assertEqual(200, response.status_code) self.assertTrue(response['Allow']) def test_options_for_get_view(self): """ A view implementing GET allows GET and HEAD. """ request = self.rf.options('/') view = SimpleView.as_view() response = view(request) self._assert_allows(response, 'GET', 'HEAD') def test_options_for_get_and_post_view(self): """ A view implementing GET and POST allows GET, HEAD, and POST. """ request = self.rf.options('/') view = SimplePostView.as_view() response = view(request) self._assert_allows(response, 'GET', 'HEAD', 'POST') def test_options_for_post_view(self): """ A view implementing POST allows POST. """ request = self.rf.options('/') view = PostOnlyView.as_view() response = view(request) self._assert_allows(response, 'POST') def _assert_allows(self, response, *expected_methods): "Assert allowed HTTP methods reported in the Allow response header" response_allows = set(response['Allow'].split(', ')) self.assertEqual(set(expected_methods + ('OPTIONS',)), response_allows) def test_args_kwargs_request_on_self(self): """ Test a view only has args, kwargs & request once `as_view` has been called. """ bare_view = InstanceView() view = InstanceView.as_view()(self.rf.get('/')) for attribute in ('args', 'kwargs', 'request'): self.assertNotIn(attribute, dir(bare_view)) self.assertIn(attribute, dir(view)) def test_overridden_setup(self): class SetAttributeMixin: def setup(self, request, *args, **kwargs): self.attr = True super().setup(request, *args, **kwargs) class CheckSetupView(SetAttributeMixin, SimpleView): def dispatch(self, request, *args, **kwargs): assert hasattr(self, 'attr') return super().dispatch(request, *args, **kwargs) response = CheckSetupView.as_view()(self.rf.get('/')) self.assertEqual(response.status_code, 200) def test_not_calling_parent_setup_error(self): class TestView(View): def setup(self, request, *args, **kwargs): pass # Not calling super().setup() msg = ( "TestView instance has no 'request' attribute. Did you override " "setup() and forget to call super()?" ) with self.assertRaisesMessage(AttributeError, msg): TestView.as_view()(self.rf.get('/')) def test_setup_adds_args_kwargs_request(self): request = self.rf.get('/') args = ('arg 1', 'arg 2') kwargs = {'kwarg_1': 1, 'kwarg_2': 'year'} view = View() view.setup(request, *args, **kwargs) self.assertEqual(request, view.request) self.assertEqual(args, view.args) self.assertEqual(kwargs, view.kwargs) def test_direct_instantiation(self): """ It should be possible to use the view by directly instantiating it without going through .as_view() (#21564). """ view = PostOnlyView() response = view.dispatch(self.rf.head('/')) self.assertEqual(response.status_code, 405) @override_settings(ROOT_URLCONF='generic_views.urls') class TemplateViewTest(SimpleTestCase): rf = RequestFactory() def _assert_about(self, response): response.render() self.assertContains(response, '<h1>About</h1>') def test_get(self): """ Test a view that simply renders a template on GET """ self._assert_about(AboutTemplateView.as_view()(self.rf.get('/about/'))) def test_head(self): """ Test a TemplateView responds correctly to HEAD """ response = AboutTemplateView.as_view()(self.rf.head('/about/')) self.assertEqual(response.status_code, 200) def test_get_template_attribute(self): """ Test a view that renders a template on GET with the template name as an attribute on the class. """ self._assert_about(AboutTemplateAttributeView.as_view()(self.rf.get('/about/'))) def test_get_generic_template(self): """ Test a completely generic view that renders a template on GET with the template name as an argument at instantiation. """ self._assert_about(TemplateView.as_view(template_name='generic_views/about.html')(self.rf.get('/about/'))) def test_template_name_required(self): """ A template view must provide a template name. """ msg = ( "TemplateResponseMixin requires either a definition of " "'template_name' or an implementation of 'get_template_names()'" ) with self.assertRaisesMessage(ImproperlyConfigured, msg): self.client.get('/template/no_template/') @require_jinja2 def test_template_engine(self): """ A template view may provide a template engine. """ request = self.rf.get('/using/') view = TemplateView.as_view(template_name='generic_views/using.html') self.assertEqual(view(request).render().content, b'DTL\n') view = TemplateView.as_view(template_name='generic_views/using.html', template_engine='django') self.assertEqual(view(request).render().content, b'DTL\n') view = TemplateView.as_view(template_name='generic_views/using.html', template_engine='jinja2') self.assertEqual(view(request).render().content, b'Jinja2\n') def test_template_params(self): """ A generic template view passes kwargs as context. """ response = self.client.get('/template/simple/bar/') self.assertEqual(response.status_code, 200) self.assertEqual(response.context['foo'], 'bar') self.assertIsInstance(response.context['view'], View) def test_extra_template_params(self): """ A template view can be customized to return extra context. """ response = self.client.get('/template/custom/bar/') self.assertEqual(response.status_code, 200) self.assertEqual(response.context['foo'], 'bar') self.assertEqual(response.context['key'], 'value') self.assertIsInstance(response.context['view'], View) def test_cached_views(self): """ A template view can be cached """ response = self.client.get('/template/cached/bar/') self.assertEqual(response.status_code, 200) time.sleep(1.0) response2 = self.client.get('/template/cached/bar/') self.assertEqual(response2.status_code, 200) self.assertEqual(response.content, response2.content) time.sleep(2.0) # Let the cache expire and test again response2 = self.client.get('/template/cached/bar/') self.assertEqual(response2.status_code, 200) self.assertNotEqual(response.content, response2.content) def test_content_type(self): response = self.client.get('/template/content_type/') self.assertEqual(response['Content-Type'], 'text/plain') def test_resolve_view(self): match = resolve('/template/content_type/') self.assertIs(match.func.view_class, TemplateView) self.assertEqual(match.func.view_initkwargs['content_type'], 'text/plain') def test_resolve_login_required_view(self): match = resolve('/template/login_required/') self.assertIs(match.func.view_class, TemplateView) def test_extra_context(self): response = self.client.get('/template/extra_context/') self.assertEqual(response.context['title'], 'Title') @override_settings(ROOT_URLCONF='generic_views.urls') class RedirectViewTest(SimpleTestCase): rf = RequestFactory() def test_no_url(self): "Without any configuration, returns HTTP 410 GONE" response = RedirectView.as_view()(self.rf.get('/foo/')) self.assertEqual(response.status_code, 410) def test_default_redirect(self): "Default is a temporary redirect" response = RedirectView.as_view(url='/bar/')(self.rf.get('/foo/')) self.assertEqual(response.status_code, 302) self.assertEqual(response.url, '/bar/') def test_permanent_redirect(self): "Permanent redirects are an option" response = RedirectView.as_view(url='/bar/', permanent=True)(self.rf.get('/foo/')) self.assertEqual(response.status_code, 301) self.assertEqual(response.url, '/bar/') def test_temporary_redirect(self): "Temporary redirects are an option" response = RedirectView.as_view(url='/bar/', permanent=False)(self.rf.get('/foo/')) self.assertEqual(response.status_code, 302) self.assertEqual(response.url, '/bar/') def test_include_args(self): "GET arguments can be included in the redirected URL" response = RedirectView.as_view(url='/bar/')(self.rf.get('/foo/')) self.assertEqual(response.status_code, 302) self.assertEqual(response.url, '/bar/') response = RedirectView.as_view(url='/bar/', query_string=True)(self.rf.get('/foo/?pork=spam')) self.assertEqual(response.status_code, 302) self.assertEqual(response.url, '/bar/?pork=spam') def test_include_urlencoded_args(self): "GET arguments can be URL-encoded when included in the redirected URL" response = RedirectView.as_view(url='/bar/', query_string=True)( self.rf.get('/foo/?unicode=%E2%9C%93')) self.assertEqual(response.status_code, 302) self.assertEqual(response.url, '/bar/?unicode=%E2%9C%93') def test_parameter_substitution(self): "Redirection URLs can be parameterized" response = RedirectView.as_view(url='/bar/%(object_id)d/')(self.rf.get('/foo/42/'), object_id=42) self.assertEqual(response.status_code, 302) self.assertEqual(response.url, '/bar/42/') def test_named_url_pattern(self): "Named pattern parameter should reverse to the matching pattern" response = RedirectView.as_view(pattern_name='artist_detail')(self.rf.get('/foo/'), pk=1) self.assertEqual(response.status_code, 302) self.assertEqual(response['Location'], '/detail/artist/1/') def test_named_url_pattern_using_args(self): response = RedirectView.as_view(pattern_name='artist_detail')(self.rf.get('/foo/'), 1) self.assertEqual(response.status_code, 302) self.assertEqual(response['Location'], '/detail/artist/1/') def test_redirect_POST(self): "Default is a temporary redirect" response = RedirectView.as_view(url='/bar/')(self.rf.post('/foo/')) self.assertEqual(response.status_code, 302) self.assertEqual(response.url, '/bar/') def test_redirect_HEAD(self): "Default is a temporary redirect" response = RedirectView.as_view(url='/bar/')(self.rf.head('/foo/')) self.assertEqual(response.status_code, 302) self.assertEqual(response.url, '/bar/') def test_redirect_OPTIONS(self): "Default is a temporary redirect" response = RedirectView.as_view(url='/bar/')(self.rf.options('/foo/')) self.assertEqual(response.status_code, 302) self.assertEqual(response.url, '/bar/') def test_redirect_PUT(self): "Default is a temporary redirect" response = RedirectView.as_view(url='/bar/')(self.rf.put('/foo/')) self.assertEqual(response.status_code, 302) self.assertEqual(response.url, '/bar/') def test_redirect_PATCH(self): "Default is a temporary redirect" response = RedirectView.as_view(url='/bar/')(self.rf.patch('/foo/')) self.assertEqual(response.status_code, 302) self.assertEqual(response.url, '/bar/') def test_redirect_DELETE(self): "Default is a temporary redirect" response = RedirectView.as_view(url='/bar/')(self.rf.delete('/foo/')) self.assertEqual(response.status_code, 302) self.assertEqual(response.url, '/bar/') def test_redirect_when_meta_contains_no_query_string(self): "regression for #16705" # we can't use self.rf.get because it always sets QUERY_STRING response = RedirectView.as_view(url='/bar/')(self.rf.request(PATH_INFO='/foo/')) self.assertEqual(response.status_code, 302) def test_direct_instantiation(self): """ It should be possible to use the view without going through .as_view() (#21564). """ view = RedirectView() response = view.dispatch(self.rf.head('/foo/')) self.assertEqual(response.status_code, 410) class GetContextDataTest(SimpleTestCase): def test_get_context_data_super(self): test_view = views.CustomContextView() context = test_view.get_context_data(kwarg_test='kwarg_value') # the test_name key is inserted by the test classes parent self.assertIn('test_name', context) self.assertEqual(context['kwarg_test'], 'kwarg_value') self.assertEqual(context['custom_key'], 'custom_value') # test that kwarg overrides values assigned higher up context = test_view.get_context_data(test_name='test_value') self.assertEqual(context['test_name'], 'test_value') def test_object_at_custom_name_in_context_data(self): # Checks 'pony' key presence in dict returned by get_context_date test_view = views.CustomSingleObjectView() test_view.context_object_name = 'pony' context = test_view.get_context_data() self.assertEqual(context['pony'], test_view.object) def test_object_in_get_context_data(self): # Checks 'object' key presence in dict returned by get_context_date #20234 test_view = views.CustomSingleObjectView() context = test_view.get_context_data() self.assertEqual(context['object'], test_view.object) class UseMultipleObjectMixinTest(SimpleTestCase): rf = RequestFactory() def test_use_queryset_from_view(self): test_view = views.CustomMultipleObjectMixinView() test_view.get(self.rf.get('/')) # Don't pass queryset as argument context = test_view.get_context_data() self.assertEqual(context['object_list'], test_view.queryset) def test_overwrite_queryset(self): test_view = views.CustomMultipleObjectMixinView() test_view.get(self.rf.get('/')) queryset = [{'name': 'Lennon'}, {'name': 'Ono'}] self.assertNotEqual(test_view.queryset, queryset) # Overwrite the view's queryset with queryset from kwarg context = test_view.get_context_data(object_list=queryset) self.assertEqual(context['object_list'], queryset) class SingleObjectTemplateResponseMixinTest(SimpleTestCase): def test_template_mixin_without_template(self): """ We want to makes sure that if you use a template mixin, but forget the template, it still tells you it's ImproperlyConfigured instead of TemplateDoesNotExist. """ view = views.TemplateResponseWithoutTemplate() msg = ( "TemplateResponseMixin requires either a definition of " "'template_name' or an implementation of 'get_template_names()'" ) with self.assertRaisesMessage(ImproperlyConfigured, msg): view.get_template_names()
802ccd0c541305a17df3feec03628a7dab095d6594ad1af57d4c36c6bbcb321f
from django import forms from django.contrib.admin import BooleanFieldListFilter, SimpleListFilter from django.contrib.admin.options import VERTICAL, ModelAdmin, TabularInline from django.contrib.admin.sites import AdminSite from django.core.checks import Error from django.db.models import F, Field, Model from django.db.models.functions import Upper from django.forms.models import BaseModelFormSet from django.test import SimpleTestCase from .models import ( Band, Song, User, ValidationTestInlineModel, ValidationTestModel, ) class CheckTestCase(SimpleTestCase): def assertIsInvalid(self, model_admin, model, msg, id=None, hint=None, invalid_obj=None, admin_site=None): if admin_site is None: admin_site = AdminSite() invalid_obj = invalid_obj or model_admin admin_obj = model_admin(model, admin_site) self.assertEqual(admin_obj.check(), [Error(msg, hint=hint, obj=invalid_obj, id=id)]) def assertIsInvalidRegexp(self, model_admin, model, msg, id=None, hint=None, invalid_obj=None): """ Same as assertIsInvalid but treats the given msg as a regexp. """ invalid_obj = invalid_obj or model_admin admin_obj = model_admin(model, AdminSite()) errors = admin_obj.check() self.assertEqual(len(errors), 1) error = errors[0] self.assertEqual(error.hint, hint) self.assertEqual(error.obj, invalid_obj) self.assertEqual(error.id, id) self.assertRegex(error.msg, msg) def assertIsValid(self, model_admin, model, admin_site=None): if admin_site is None: admin_site = AdminSite() admin_obj = model_admin(model, admin_site) self.assertEqual(admin_obj.check(), []) class RawIdCheckTests(CheckTestCase): def test_not_iterable(self): class TestModelAdmin(ModelAdmin): raw_id_fields = 10 self.assertIsInvalid( TestModelAdmin, ValidationTestModel, "The value of 'raw_id_fields' must be a list or tuple.", 'admin.E001' ) def test_missing_field(self): class TestModelAdmin(ModelAdmin): raw_id_fields = ('non_existent_field',) self.assertIsInvalid( TestModelAdmin, ValidationTestModel, "The value of 'raw_id_fields[0]' refers to 'non_existent_field', " "which is not an attribute of 'modeladmin.ValidationTestModel'.", 'admin.E002' ) def test_invalid_field_type(self): class TestModelAdmin(ModelAdmin): raw_id_fields = ('name',) self.assertIsInvalid( TestModelAdmin, ValidationTestModel, "The value of 'raw_id_fields[0]' must be a foreign key or a " "many-to-many field.", 'admin.E003' ) def test_valid_case(self): class TestModelAdmin(ModelAdmin): raw_id_fields = ('users',) self.assertIsValid(TestModelAdmin, ValidationTestModel) class FieldsetsCheckTests(CheckTestCase): def test_valid_case(self): class TestModelAdmin(ModelAdmin): fieldsets = (('General', {'fields': ('name',)}),) self.assertIsValid(TestModelAdmin, ValidationTestModel) def test_not_iterable(self): class TestModelAdmin(ModelAdmin): fieldsets = 10 self.assertIsInvalid( TestModelAdmin, ValidationTestModel, "The value of 'fieldsets' must be a list or tuple.", 'admin.E007' ) def test_non_iterable_item(self): class TestModelAdmin(ModelAdmin): fieldsets = ({},) self.assertIsInvalid( TestModelAdmin, ValidationTestModel, "The value of 'fieldsets[0]' must be a list or tuple.", 'admin.E008' ) def test_item_not_a_pair(self): class TestModelAdmin(ModelAdmin): fieldsets = ((),) self.assertIsInvalid( TestModelAdmin, ValidationTestModel, "The value of 'fieldsets[0]' must be of length 2.", 'admin.E009' ) def test_second_element_of_item_not_a_dict(self): class TestModelAdmin(ModelAdmin): fieldsets = (('General', ()),) self.assertIsInvalid( TestModelAdmin, ValidationTestModel, "The value of 'fieldsets[0][1]' must be a dictionary.", 'admin.E010' ) def test_missing_fields_key(self): class TestModelAdmin(ModelAdmin): fieldsets = (('General', {}),) self.assertIsInvalid( TestModelAdmin, ValidationTestModel, "The value of 'fieldsets[0][1]' must contain the key 'fields'.", 'admin.E011' ) class TestModelAdmin(ModelAdmin): fieldsets = (('General', {'fields': ('name',)}),) self.assertIsValid(TestModelAdmin, ValidationTestModel) def test_specified_both_fields_and_fieldsets(self): class TestModelAdmin(ModelAdmin): fieldsets = (('General', {'fields': ('name',)}),) fields = ['name'] self.assertIsInvalid( TestModelAdmin, ValidationTestModel, "Both 'fieldsets' and 'fields' are specified.", 'admin.E005' ) def test_duplicate_fields(self): class TestModelAdmin(ModelAdmin): fieldsets = [(None, {'fields': ['name', 'name']})] self.assertIsInvalid( TestModelAdmin, ValidationTestModel, "There are duplicate field(s) in 'fieldsets[0][1]'.", 'admin.E012' ) def test_duplicate_fields_in_fieldsets(self): class TestModelAdmin(ModelAdmin): fieldsets = [ (None, {'fields': ['name']}), (None, {'fields': ['name']}), ] self.assertIsInvalid( TestModelAdmin, ValidationTestModel, "There are duplicate field(s) in 'fieldsets[1][1]'.", 'admin.E012' ) def test_fieldsets_with_custom_form_validation(self): class BandAdmin(ModelAdmin): fieldsets = (('Band', {'fields': ('name',)}),) self.assertIsValid(BandAdmin, Band) class FieldsCheckTests(CheckTestCase): def test_duplicate_fields_in_fields(self): class TestModelAdmin(ModelAdmin): fields = ['name', 'name'] self.assertIsInvalid( TestModelAdmin, ValidationTestModel, "The value of 'fields' contains duplicate field(s).", 'admin.E006' ) def test_inline(self): class ValidationTestInline(TabularInline): model = ValidationTestInlineModel fields = 10 class TestModelAdmin(ModelAdmin): inlines = [ValidationTestInline] self.assertIsInvalid( TestModelAdmin, ValidationTestModel, "The value of 'fields' must be a list or tuple.", 'admin.E004', invalid_obj=ValidationTestInline ) class FormCheckTests(CheckTestCase): def test_invalid_type(self): class FakeForm: pass class TestModelAdmin(ModelAdmin): form = FakeForm class TestModelAdminWithNoForm(ModelAdmin): form = 'not a form' for model_admin in (TestModelAdmin, TestModelAdminWithNoForm): with self.subTest(model_admin): self.assertIsInvalid( model_admin, ValidationTestModel, "The value of 'form' must inherit from 'BaseModelForm'.", 'admin.E016' ) def test_fieldsets_with_custom_form_validation(self): class BandAdmin(ModelAdmin): fieldsets = (('Band', {'fields': ('name',)}),) self.assertIsValid(BandAdmin, Band) def test_valid_case(self): class AdminBandForm(forms.ModelForm): delete = forms.BooleanField() class BandAdmin(ModelAdmin): form = AdminBandForm fieldsets = ( ('Band', { 'fields': ('name', 'bio', 'sign_date', 'delete') }), ) self.assertIsValid(BandAdmin, Band) class FilterVerticalCheckTests(CheckTestCase): def test_not_iterable(self): class TestModelAdmin(ModelAdmin): filter_vertical = 10 self.assertIsInvalid( TestModelAdmin, ValidationTestModel, "The value of 'filter_vertical' must be a list or tuple.", 'admin.E017' ) def test_missing_field(self): class TestModelAdmin(ModelAdmin): filter_vertical = ('non_existent_field',) self.assertIsInvalid( TestModelAdmin, ValidationTestModel, "The value of 'filter_vertical[0]' refers to 'non_existent_field', " "which is not an attribute of 'modeladmin.ValidationTestModel'.", 'admin.E019' ) def test_invalid_field_type(self): class TestModelAdmin(ModelAdmin): filter_vertical = ('name',) self.assertIsInvalid( TestModelAdmin, ValidationTestModel, "The value of 'filter_vertical[0]' must be a many-to-many field.", 'admin.E020' ) def test_valid_case(self): class TestModelAdmin(ModelAdmin): filter_vertical = ('users',) self.assertIsValid(TestModelAdmin, ValidationTestModel) class FilterHorizontalCheckTests(CheckTestCase): def test_not_iterable(self): class TestModelAdmin(ModelAdmin): filter_horizontal = 10 self.assertIsInvalid( TestModelAdmin, ValidationTestModel, "The value of 'filter_horizontal' must be a list or tuple.", 'admin.E018' ) def test_missing_field(self): class TestModelAdmin(ModelAdmin): filter_horizontal = ('non_existent_field',) self.assertIsInvalid( TestModelAdmin, ValidationTestModel, "The value of 'filter_horizontal[0]' refers to 'non_existent_field', " "which is not an attribute of 'modeladmin.ValidationTestModel'.", 'admin.E019' ) def test_invalid_field_type(self): class TestModelAdmin(ModelAdmin): filter_horizontal = ('name',) self.assertIsInvalid( TestModelAdmin, ValidationTestModel, "The value of 'filter_horizontal[0]' must be a many-to-many field.", 'admin.E020' ) def test_valid_case(self): class TestModelAdmin(ModelAdmin): filter_horizontal = ('users',) self.assertIsValid(TestModelAdmin, ValidationTestModel) class RadioFieldsCheckTests(CheckTestCase): def test_not_dictionary(self): class TestModelAdmin(ModelAdmin): radio_fields = () self.assertIsInvalid( TestModelAdmin, ValidationTestModel, "The value of 'radio_fields' must be a dictionary.", 'admin.E021' ) def test_missing_field(self): class TestModelAdmin(ModelAdmin): radio_fields = {'non_existent_field': VERTICAL} self.assertIsInvalid( TestModelAdmin, ValidationTestModel, "The value of 'radio_fields' refers to 'non_existent_field', " "which is not an attribute of 'modeladmin.ValidationTestModel'.", 'admin.E022' ) def test_invalid_field_type(self): class TestModelAdmin(ModelAdmin): radio_fields = {'name': VERTICAL} self.assertIsInvalid( TestModelAdmin, ValidationTestModel, "The value of 'radio_fields' refers to 'name', which is not an instance " "of ForeignKey, and does not have a 'choices' definition.", 'admin.E023' ) def test_invalid_value(self): class TestModelAdmin(ModelAdmin): radio_fields = {'state': None} self.assertIsInvalid( TestModelAdmin, ValidationTestModel, "The value of 'radio_fields[\"state\"]' must be either admin.HORIZONTAL or admin.VERTICAL.", 'admin.E024' ) def test_valid_case(self): class TestModelAdmin(ModelAdmin): radio_fields = {'state': VERTICAL} self.assertIsValid(TestModelAdmin, ValidationTestModel) class PrepopulatedFieldsCheckTests(CheckTestCase): def test_not_list_or_tuple(self): class TestModelAdmin(ModelAdmin): prepopulated_fields = {'slug': 'test'} self.assertIsInvalid( TestModelAdmin, ValidationTestModel, 'The value of \'prepopulated_fields["slug"]\' must be a list ' 'or tuple.', 'admin.E029' ) def test_not_dictionary(self): class TestModelAdmin(ModelAdmin): prepopulated_fields = () self.assertIsInvalid( TestModelAdmin, ValidationTestModel, "The value of 'prepopulated_fields' must be a dictionary.", 'admin.E026' ) def test_missing_field(self): class TestModelAdmin(ModelAdmin): prepopulated_fields = {'non_existent_field': ('slug',)} self.assertIsInvalid( TestModelAdmin, ValidationTestModel, "The value of 'prepopulated_fields' refers to 'non_existent_field', " "which is not an attribute of 'modeladmin.ValidationTestModel'.", 'admin.E027' ) def test_missing_field_again(self): class TestModelAdmin(ModelAdmin): prepopulated_fields = {'slug': ('non_existent_field',)} self.assertIsInvalid( TestModelAdmin, ValidationTestModel, "The value of 'prepopulated_fields[\"slug\"][0]' refers to 'non_existent_field', " "which is not an attribute of 'modeladmin.ValidationTestModel'.", 'admin.E030' ) def test_invalid_field_type(self): class TestModelAdmin(ModelAdmin): prepopulated_fields = {'users': ('name',)} self.assertIsInvalid( TestModelAdmin, ValidationTestModel, "The value of 'prepopulated_fields' refers to 'users', which must not be " "a DateTimeField, a ForeignKey, a OneToOneField, or a ManyToManyField.", 'admin.E028' ) def test_valid_case(self): class TestModelAdmin(ModelAdmin): prepopulated_fields = {'slug': ('name',)} self.assertIsValid(TestModelAdmin, ValidationTestModel) def test_one_to_one_field(self): class TestModelAdmin(ModelAdmin): prepopulated_fields = {'best_friend': ('name',)} self.assertIsInvalid( TestModelAdmin, ValidationTestModel, "The value of 'prepopulated_fields' refers to 'best_friend', which must not be " "a DateTimeField, a ForeignKey, a OneToOneField, or a ManyToManyField.", 'admin.E028' ) class ListDisplayTests(CheckTestCase): def test_not_iterable(self): class TestModelAdmin(ModelAdmin): list_display = 10 self.assertIsInvalid( TestModelAdmin, ValidationTestModel, "The value of 'list_display' must be a list or tuple.", 'admin.E107' ) def test_missing_field(self): class TestModelAdmin(ModelAdmin): list_display = ('non_existent_field',) self.assertIsInvalid( TestModelAdmin, ValidationTestModel, "The value of 'list_display[0]' refers to 'non_existent_field', " "which is not a callable, an attribute of 'TestModelAdmin', " "or an attribute or method on 'modeladmin.ValidationTestModel'.", 'admin.E108' ) def test_invalid_field_type(self): class TestModelAdmin(ModelAdmin): list_display = ('users',) self.assertIsInvalid( TestModelAdmin, ValidationTestModel, "The value of 'list_display[0]' must not be a ManyToManyField.", 'admin.E109' ) def test_valid_case(self): def a_callable(obj): pass class TestModelAdmin(ModelAdmin): def a_method(self, obj): pass list_display = ('name', 'decade_published_in', 'a_method', a_callable) self.assertIsValid(TestModelAdmin, ValidationTestModel) def test_valid_field_accessible_via_instance(self): class PositionField(Field): """Custom field accessible only via instance.""" def contribute_to_class(self, cls, name): super().contribute_to_class(cls, name) setattr(cls, self.name, self) def __get__(self, instance, owner): if instance is None: raise AttributeError() class TestModel(Model): field = PositionField() class TestModelAdmin(ModelAdmin): list_display = ('field',) self.assertIsValid(TestModelAdmin, TestModel) class ListDisplayLinksCheckTests(CheckTestCase): def test_not_iterable(self): class TestModelAdmin(ModelAdmin): list_display_links = 10 self.assertIsInvalid( TestModelAdmin, ValidationTestModel, "The value of 'list_display_links' must be a list, a tuple, or None.", 'admin.E110' ) def test_missing_field(self): class TestModelAdmin(ModelAdmin): list_display_links = ('non_existent_field',) self.assertIsInvalid( TestModelAdmin, ValidationTestModel, ( "The value of 'list_display_links[0]' refers to " "'non_existent_field', which is not defined in 'list_display'." ), 'admin.E111' ) def test_missing_in_list_display(self): class TestModelAdmin(ModelAdmin): list_display_links = ('name',) self.assertIsInvalid( TestModelAdmin, ValidationTestModel, "The value of 'list_display_links[0]' refers to 'name', which is not defined in 'list_display'.", 'admin.E111' ) def test_valid_case(self): def a_callable(obj): pass class TestModelAdmin(ModelAdmin): def a_method(self, obj): pass list_display = ('name', 'decade_published_in', 'a_method', a_callable) list_display_links = ('name', 'decade_published_in', 'a_method', a_callable) self.assertIsValid(TestModelAdmin, ValidationTestModel) def test_None_is_valid_case(self): class TestModelAdmin(ModelAdmin): list_display_links = None self.assertIsValid(TestModelAdmin, ValidationTestModel) def test_list_display_links_check_skipped_if_get_list_display_overridden(self): """ list_display_links check is skipped if get_list_display() is overridden. """ class TestModelAdmin(ModelAdmin): list_display_links = ['name', 'subtitle'] def get_list_display(self, request): pass self.assertIsValid(TestModelAdmin, ValidationTestModel) def test_list_display_link_checked_for_list_tuple_if_get_list_display_overridden(self): """ list_display_links is checked for list/tuple/None even if get_list_display() is overridden. """ class TestModelAdmin(ModelAdmin): list_display_links = 'non-list/tuple' def get_list_display(self, request): pass self.assertIsInvalid( TestModelAdmin, ValidationTestModel, "The value of 'list_display_links' must be a list, a tuple, or None.", 'admin.E110' ) class ListFilterTests(CheckTestCase): def test_list_filter_validation(self): class TestModelAdmin(ModelAdmin): list_filter = 10 self.assertIsInvalid( TestModelAdmin, ValidationTestModel, "The value of 'list_filter' must be a list or tuple.", 'admin.E112' ) def test_not_list_filter_class(self): class TestModelAdmin(ModelAdmin): list_filter = ['RandomClass'] self.assertIsInvalid( TestModelAdmin, ValidationTestModel, "The value of 'list_filter[0]' refers to 'RandomClass', which " "does not refer to a Field.", 'admin.E116' ) def test_callable(self): def random_callable(): pass class TestModelAdmin(ModelAdmin): list_filter = [random_callable] self.assertIsInvalid( TestModelAdmin, ValidationTestModel, "The value of 'list_filter[0]' must inherit from 'ListFilter'.", 'admin.E113' ) def test_not_callable(self): class TestModelAdmin(ModelAdmin): list_filter = [[42, 42]] self.assertIsInvalid( TestModelAdmin, ValidationTestModel, "The value of 'list_filter[0][1]' must inherit from 'FieldListFilter'.", 'admin.E115' ) def test_missing_field(self): class TestModelAdmin(ModelAdmin): list_filter = ('non_existent_field',) self.assertIsInvalid( TestModelAdmin, ValidationTestModel, "The value of 'list_filter[0]' refers to 'non_existent_field', " "which does not refer to a Field.", 'admin.E116' ) def test_not_filter(self): class RandomClass: pass class TestModelAdmin(ModelAdmin): list_filter = (RandomClass,) self.assertIsInvalid( TestModelAdmin, ValidationTestModel, "The value of 'list_filter[0]' must inherit from 'ListFilter'.", 'admin.E113' ) def test_not_filter_again(self): class RandomClass: pass class TestModelAdmin(ModelAdmin): list_filter = (('is_active', RandomClass),) self.assertIsInvalid( TestModelAdmin, ValidationTestModel, "The value of 'list_filter[0][1]' must inherit from 'FieldListFilter'.", 'admin.E115' ) def test_not_filter_again_again(self): class AwesomeFilter(SimpleListFilter): def get_title(self): return 'awesomeness' def get_choices(self, request): return (('bit', 'A bit awesome'), ('very', 'Very awesome')) def get_queryset(self, cl, qs): return qs class TestModelAdmin(ModelAdmin): list_filter = (('is_active', AwesomeFilter),) self.assertIsInvalid( TestModelAdmin, ValidationTestModel, "The value of 'list_filter[0][1]' must inherit from 'FieldListFilter'.", 'admin.E115' ) def test_list_filter_is_func(self): def get_filter(): pass class TestModelAdmin(ModelAdmin): list_filter = [get_filter] self.assertIsInvalid( TestModelAdmin, ValidationTestModel, "The value of 'list_filter[0]' must inherit from 'ListFilter'.", 'admin.E113' ) def test_not_associated_with_field_name(self): class TestModelAdmin(ModelAdmin): list_filter = (BooleanFieldListFilter,) self.assertIsInvalid( TestModelAdmin, ValidationTestModel, "The value of 'list_filter[0]' must not inherit from 'FieldListFilter'.", 'admin.E114' ) def test_valid_case(self): class AwesomeFilter(SimpleListFilter): def get_title(self): return 'awesomeness' def get_choices(self, request): return (('bit', 'A bit awesome'), ('very', 'Very awesome')) def get_queryset(self, cl, qs): return qs class TestModelAdmin(ModelAdmin): list_filter = ('is_active', AwesomeFilter, ('is_active', BooleanFieldListFilter), 'no') self.assertIsValid(TestModelAdmin, ValidationTestModel) class ListPerPageCheckTests(CheckTestCase): def test_not_integer(self): class TestModelAdmin(ModelAdmin): list_per_page = 'hello' self.assertIsInvalid( TestModelAdmin, ValidationTestModel, "The value of 'list_per_page' must be an integer.", 'admin.E118' ) def test_valid_case(self): class TestModelAdmin(ModelAdmin): list_per_page = 100 self.assertIsValid(TestModelAdmin, ValidationTestModel) class ListMaxShowAllCheckTests(CheckTestCase): def test_not_integer(self): class TestModelAdmin(ModelAdmin): list_max_show_all = 'hello' self.assertIsInvalid( TestModelAdmin, ValidationTestModel, "The value of 'list_max_show_all' must be an integer.", 'admin.E119' ) def test_valid_case(self): class TestModelAdmin(ModelAdmin): list_max_show_all = 200 self.assertIsValid(TestModelAdmin, ValidationTestModel) class SearchFieldsCheckTests(CheckTestCase): def test_not_iterable(self): class TestModelAdmin(ModelAdmin): search_fields = 10 self.assertIsInvalid( TestModelAdmin, ValidationTestModel, "The value of 'search_fields' must be a list or tuple.", 'admin.E126' ) class DateHierarchyCheckTests(CheckTestCase): def test_missing_field(self): class TestModelAdmin(ModelAdmin): date_hierarchy = 'non_existent_field' self.assertIsInvalid( TestModelAdmin, ValidationTestModel, "The value of 'date_hierarchy' refers to 'non_existent_field', " "which does not refer to a Field.", 'admin.E127' ) def test_invalid_field_type(self): class TestModelAdmin(ModelAdmin): date_hierarchy = 'name' self.assertIsInvalid( TestModelAdmin, ValidationTestModel, "The value of 'date_hierarchy' must be a DateField or DateTimeField.", 'admin.E128' ) def test_valid_case(self): class TestModelAdmin(ModelAdmin): date_hierarchy = 'pub_date' self.assertIsValid(TestModelAdmin, ValidationTestModel) def test_related_valid_case(self): class TestModelAdmin(ModelAdmin): date_hierarchy = 'band__sign_date' self.assertIsValid(TestModelAdmin, ValidationTestModel) def test_related_invalid_field_type(self): class TestModelAdmin(ModelAdmin): date_hierarchy = 'band__name' self.assertIsInvalid( TestModelAdmin, ValidationTestModel, "The value of 'date_hierarchy' must be a DateField or DateTimeField.", 'admin.E128' ) class OrderingCheckTests(CheckTestCase): def test_not_iterable(self): class TestModelAdmin(ModelAdmin): ordering = 10 self.assertIsInvalid( TestModelAdmin, ValidationTestModel, "The value of 'ordering' must be a list or tuple.", 'admin.E031' ) class TestModelAdmin(ModelAdmin): ordering = ('non_existent_field',) self.assertIsInvalid( TestModelAdmin, ValidationTestModel, "The value of 'ordering[0]' refers to 'non_existent_field', " "which is not an attribute of 'modeladmin.ValidationTestModel'.", 'admin.E033' ) def test_random_marker_not_alone(self): class TestModelAdmin(ModelAdmin): ordering = ('?', 'name') self.assertIsInvalid( TestModelAdmin, ValidationTestModel, "The value of 'ordering' has the random ordering marker '?', but contains " "other fields as well.", 'admin.E032', hint='Either remove the "?", or remove the other fields.' ) def test_valid_random_marker_case(self): class TestModelAdmin(ModelAdmin): ordering = ('?',) self.assertIsValid(TestModelAdmin, ValidationTestModel) def test_valid_complex_case(self): class TestModelAdmin(ModelAdmin): ordering = ('band__name',) self.assertIsValid(TestModelAdmin, ValidationTestModel) def test_valid_case(self): class TestModelAdmin(ModelAdmin): ordering = ('name', 'pk') self.assertIsValid(TestModelAdmin, ValidationTestModel) def test_invalid_expression(self): class TestModelAdmin(ModelAdmin): ordering = (F('nonexistent'), ) self.assertIsInvalid( TestModelAdmin, ValidationTestModel, "The value of 'ordering[0]' refers to 'nonexistent', which is not " "an attribute of 'modeladmin.ValidationTestModel'.", 'admin.E033' ) def test_valid_expression(self): class TestModelAdmin(ModelAdmin): ordering = (Upper('name'), Upper('band__name').desc()) self.assertIsValid(TestModelAdmin, ValidationTestModel) class ListSelectRelatedCheckTests(CheckTestCase): def test_invalid_type(self): class TestModelAdmin(ModelAdmin): list_select_related = 1 self.assertIsInvalid( TestModelAdmin, ValidationTestModel, "The value of 'list_select_related' must be a boolean, tuple or list.", 'admin.E117' ) def test_valid_case(self): class TestModelAdmin(ModelAdmin): list_select_related = False self.assertIsValid(TestModelAdmin, ValidationTestModel) class SaveAsCheckTests(CheckTestCase): def test_not_boolean(self): class TestModelAdmin(ModelAdmin): save_as = 1 self.assertIsInvalid( TestModelAdmin, ValidationTestModel, "The value of 'save_as' must be a boolean.", 'admin.E101' ) def test_valid_case(self): class TestModelAdmin(ModelAdmin): save_as = True self.assertIsValid(TestModelAdmin, ValidationTestModel) class SaveOnTopCheckTests(CheckTestCase): def test_not_boolean(self): class TestModelAdmin(ModelAdmin): save_on_top = 1 self.assertIsInvalid( TestModelAdmin, ValidationTestModel, "The value of 'save_on_top' must be a boolean.", 'admin.E102' ) def test_valid_case(self): class TestModelAdmin(ModelAdmin): save_on_top = True self.assertIsValid(TestModelAdmin, ValidationTestModel) class InlinesCheckTests(CheckTestCase): def test_not_iterable(self): class TestModelAdmin(ModelAdmin): inlines = 10 self.assertIsInvalid( TestModelAdmin, ValidationTestModel, "The value of 'inlines' must be a list or tuple.", 'admin.E103' ) def test_not_correct_inline_field(self): class TestModelAdmin(ModelAdmin): inlines = [42] self.assertIsInvalidRegexp( TestModelAdmin, ValidationTestModel, r"'.*\.TestModelAdmin' must inherit from 'InlineModelAdmin'\.", 'admin.E104' ) def test_not_model_admin(self): class ValidationTestInline: pass class TestModelAdmin(ModelAdmin): inlines = [ValidationTestInline] self.assertIsInvalidRegexp( TestModelAdmin, ValidationTestModel, r"'.*\.ValidationTestInline' must inherit from 'InlineModelAdmin'\.", 'admin.E104' ) def test_missing_model_field(self): class ValidationTestInline(TabularInline): pass class TestModelAdmin(ModelAdmin): inlines = [ValidationTestInline] self.assertIsInvalidRegexp( TestModelAdmin, ValidationTestModel, r"'.*\.ValidationTestInline' must have a 'model' attribute\.", 'admin.E105' ) def test_invalid_model_type(self): class SomethingBad: pass class ValidationTestInline(TabularInline): model = SomethingBad class TestModelAdmin(ModelAdmin): inlines = [ValidationTestInline] self.assertIsInvalidRegexp( TestModelAdmin, ValidationTestModel, r"The value of '.*\.ValidationTestInline.model' must be a Model\.", 'admin.E106' ) def test_invalid_model(self): class ValidationTestInline(TabularInline): model = 'Not a class' class TestModelAdmin(ModelAdmin): inlines = [ValidationTestInline] self.assertIsInvalidRegexp( TestModelAdmin, ValidationTestModel, r"The value of '.*\.ValidationTestInline.model' must be a Model\.", 'admin.E106' ) def test_invalid_callable(self): def random_obj(): pass class TestModelAdmin(ModelAdmin): inlines = [random_obj] self.assertIsInvalidRegexp( TestModelAdmin, ValidationTestModel, r"'.*\.random_obj' must inherit from 'InlineModelAdmin'\.", 'admin.E104' ) def test_valid_case(self): class ValidationTestInline(TabularInline): model = ValidationTestInlineModel class TestModelAdmin(ModelAdmin): inlines = [ValidationTestInline] self.assertIsValid(TestModelAdmin, ValidationTestModel) class FkNameCheckTests(CheckTestCase): def test_missing_field(self): class ValidationTestInline(TabularInline): model = ValidationTestInlineModel fk_name = 'non_existent_field' class TestModelAdmin(ModelAdmin): inlines = [ValidationTestInline] self.assertIsInvalid( TestModelAdmin, ValidationTestModel, "'modeladmin.ValidationTestInlineModel' has no field named 'non_existent_field'.", 'admin.E202', invalid_obj=ValidationTestInline ) def test_valid_case(self): class ValidationTestInline(TabularInline): model = ValidationTestInlineModel fk_name = 'parent' class TestModelAdmin(ModelAdmin): inlines = [ValidationTestInline] self.assertIsValid(TestModelAdmin, ValidationTestModel) class ExtraCheckTests(CheckTestCase): def test_not_integer(self): class ValidationTestInline(TabularInline): model = ValidationTestInlineModel extra = 'hello' class TestModelAdmin(ModelAdmin): inlines = [ValidationTestInline] self.assertIsInvalid( TestModelAdmin, ValidationTestModel, "The value of 'extra' must be an integer.", 'admin.E203', invalid_obj=ValidationTestInline ) def test_valid_case(self): class ValidationTestInline(TabularInline): model = ValidationTestInlineModel extra = 2 class TestModelAdmin(ModelAdmin): inlines = [ValidationTestInline] self.assertIsValid(TestModelAdmin, ValidationTestModel) class MaxNumCheckTests(CheckTestCase): def test_not_integer(self): class ValidationTestInline(TabularInline): model = ValidationTestInlineModel max_num = 'hello' class TestModelAdmin(ModelAdmin): inlines = [ValidationTestInline] self.assertIsInvalid( TestModelAdmin, ValidationTestModel, "The value of 'max_num' must be an integer.", 'admin.E204', invalid_obj=ValidationTestInline ) def test_valid_case(self): class ValidationTestInline(TabularInline): model = ValidationTestInlineModel max_num = 2 class TestModelAdmin(ModelAdmin): inlines = [ValidationTestInline] self.assertIsValid(TestModelAdmin, ValidationTestModel) class MinNumCheckTests(CheckTestCase): def test_not_integer(self): class ValidationTestInline(TabularInline): model = ValidationTestInlineModel min_num = 'hello' class TestModelAdmin(ModelAdmin): inlines = [ValidationTestInline] self.assertIsInvalid( TestModelAdmin, ValidationTestModel, "The value of 'min_num' must be an integer.", 'admin.E205', invalid_obj=ValidationTestInline ) def test_valid_case(self): class ValidationTestInline(TabularInline): model = ValidationTestInlineModel min_num = 2 class TestModelAdmin(ModelAdmin): inlines = [ValidationTestInline] self.assertIsValid(TestModelAdmin, ValidationTestModel) class FormsetCheckTests(CheckTestCase): def test_invalid_type(self): class FakeFormSet: pass class ValidationTestInline(TabularInline): model = ValidationTestInlineModel formset = FakeFormSet class TestModelAdmin(ModelAdmin): inlines = [ValidationTestInline] self.assertIsInvalid( TestModelAdmin, ValidationTestModel, "The value of 'formset' must inherit from 'BaseModelFormSet'.", 'admin.E206', invalid_obj=ValidationTestInline ) def test_inline_without_formset_class(self): class ValidationTestInlineWithoutFormsetClass(TabularInline): model = ValidationTestInlineModel formset = 'Not a FormSet Class' class TestModelAdminWithoutFormsetClass(ModelAdmin): inlines = [ValidationTestInlineWithoutFormsetClass] self.assertIsInvalid( TestModelAdminWithoutFormsetClass, ValidationTestModel, "The value of 'formset' must inherit from 'BaseModelFormSet'.", 'admin.E206', invalid_obj=ValidationTestInlineWithoutFormsetClass ) def test_valid_case(self): class RealModelFormSet(BaseModelFormSet): pass class ValidationTestInline(TabularInline): model = ValidationTestInlineModel formset = RealModelFormSet class TestModelAdmin(ModelAdmin): inlines = [ValidationTestInline] self.assertIsValid(TestModelAdmin, ValidationTestModel) class ListDisplayEditableTests(CheckTestCase): def test_list_display_links_is_none(self): """ list_display and list_editable can contain the same values when list_display_links is None """ class ProductAdmin(ModelAdmin): list_display = ['name', 'slug', 'pub_date'] list_editable = list_display list_display_links = None self.assertIsValid(ProductAdmin, ValidationTestModel) def test_list_display_first_item_same_as_list_editable_first_item(self): """ The first item in list_display can be the same as the first in list_editable. """ class ProductAdmin(ModelAdmin): list_display = ['name', 'slug', 'pub_date'] list_editable = ['name', 'slug'] list_display_links = ['pub_date'] self.assertIsValid(ProductAdmin, ValidationTestModel) def test_list_display_first_item_in_list_editable(self): """ The first item in list_display can be in list_editable as long as list_display_links is defined. """ class ProductAdmin(ModelAdmin): list_display = ['name', 'slug', 'pub_date'] list_editable = ['slug', 'name'] list_display_links = ['pub_date'] self.assertIsValid(ProductAdmin, ValidationTestModel) def test_list_display_first_item_same_as_list_editable_no_list_display_links(self): """ The first item in list_display cannot be the same as the first item in list_editable if list_display_links is not defined. """ class ProductAdmin(ModelAdmin): list_display = ['name'] list_editable = ['name'] self.assertIsInvalid( ProductAdmin, ValidationTestModel, "The value of 'list_editable[0]' refers to the first field " "in 'list_display' ('name'), which cannot be used unless " "'list_display_links' is set.", id='admin.E124', ) def test_list_display_first_item_in_list_editable_no_list_display_links(self): """ The first item in list_display cannot be in list_editable if list_display_links isn't defined. """ class ProductAdmin(ModelAdmin): list_display = ['name', 'slug', 'pub_date'] list_editable = ['slug', 'name'] self.assertIsInvalid( ProductAdmin, ValidationTestModel, "The value of 'list_editable[1]' refers to the first field " "in 'list_display' ('name'), which cannot be used unless " "'list_display_links' is set.", id='admin.E124', ) def test_both_list_editable_and_list_display_links(self): class ProductAdmin(ModelAdmin): list_editable = ('name',) list_display = ('name',) list_display_links = ('name',) self.assertIsInvalid( ProductAdmin, ValidationTestModel, "The value of 'name' cannot be in both 'list_editable' and " "'list_display_links'.", id='admin.E123', ) class AutocompleteFieldsTests(CheckTestCase): def test_autocomplete_e036(self): class Admin(ModelAdmin): autocomplete_fields = 'name' self.assertIsInvalid( Admin, Band, msg="The value of 'autocomplete_fields' must be a list or tuple.", id='admin.E036', invalid_obj=Admin, ) def test_autocomplete_e037(self): class Admin(ModelAdmin): autocomplete_fields = ('nonexistent',) self.assertIsInvalid( Admin, ValidationTestModel, msg=( "The value of 'autocomplete_fields[0]' refers to 'nonexistent', " "which is not an attribute of 'modeladmin.ValidationTestModel'." ), id='admin.E037', invalid_obj=Admin, ) def test_autocomplete_e38(self): class Admin(ModelAdmin): autocomplete_fields = ('name',) self.assertIsInvalid( Admin, ValidationTestModel, msg=( "The value of 'autocomplete_fields[0]' must be a foreign " "key or a many-to-many field." ), id='admin.E038', invalid_obj=Admin, ) def test_autocomplete_e039(self): class Admin(ModelAdmin): autocomplete_fields = ('band',) self.assertIsInvalid( Admin, Song, msg=( 'An admin for model "Band" has to be registered ' 'to be referenced by Admin.autocomplete_fields.' ), id='admin.E039', invalid_obj=Admin, ) def test_autocomplete_e040(self): class NoSearchFieldsAdmin(ModelAdmin): pass class AutocompleteAdmin(ModelAdmin): autocomplete_fields = ('featuring',) site = AdminSite() site.register(Band, NoSearchFieldsAdmin) self.assertIsInvalid( AutocompleteAdmin, Song, msg=( 'NoSearchFieldsAdmin must define "search_fields", because ' 'it\'s referenced by AutocompleteAdmin.autocomplete_fields.' ), id='admin.E040', invalid_obj=AutocompleteAdmin, admin_site=site, ) def test_autocomplete_is_valid(self): class SearchFieldsAdmin(ModelAdmin): search_fields = 'name' class AutocompleteAdmin(ModelAdmin): autocomplete_fields = ('featuring',) site = AdminSite() site.register(Band, SearchFieldsAdmin) self.assertIsValid(AutocompleteAdmin, Song, admin_site=site) def test_autocomplete_is_onetoone(self): class UserAdmin(ModelAdmin): search_fields = ('name',) class Admin(ModelAdmin): autocomplete_fields = ('best_friend',) site = AdminSite() site.register(User, UserAdmin) self.assertIsValid(Admin, ValidationTestModel, admin_site=site) class ActionsCheckTests(CheckTestCase): def test_custom_permissions_require_matching_has_method(self): def custom_permission_action(modeladmin, request, queryset): pass custom_permission_action.allowed_permissions = ('custom',) class BandAdmin(ModelAdmin): actions = (custom_permission_action,) self.assertIsInvalid( BandAdmin, Band, 'BandAdmin must define a has_custom_permission() method for the ' 'custom_permission_action action.', id='admin.E129', ) def test_actions_not_unique(self): def action(modeladmin, request, queryset): pass class BandAdmin(ModelAdmin): actions = (action, action) self.assertIsInvalid( BandAdmin, Band, "__name__ attributes of actions defined in BandAdmin must be " "unique. Name 'action' is not unique.", id='admin.E130', ) def test_actions_unique(self): def action1(modeladmin, request, queryset): pass def action2(modeladmin, request, queryset): pass class BandAdmin(ModelAdmin): actions = (action1, action2) self.assertIsValid(BandAdmin, Band)
525fd81c5050fd24cc79ea1bde9064a87ee8cd8e41deb835b1de7a09fe72b06b
from django.db import connection from django.db.models import Exists, F, IntegerField, OuterRef, Value from django.db.utils import DatabaseError, NotSupportedError from django.test import TestCase, skipIfDBFeature, skipUnlessDBFeature from .models import Number, ReservedName @skipUnlessDBFeature('supports_select_union') class QuerySetSetOperationTests(TestCase): @classmethod def setUpTestData(cls): Number.objects.bulk_create(Number(num=i, other_num=10 - i) for i in range(10)) def number_transform(self, value): return value.num def assertNumbersEqual(self, queryset, expected_numbers, ordered=True): self.assertQuerysetEqual(queryset, expected_numbers, self.number_transform, ordered) def test_simple_union(self): qs1 = Number.objects.filter(num__lte=1) qs2 = Number.objects.filter(num__gte=8) qs3 = Number.objects.filter(num=5) self.assertNumbersEqual(qs1.union(qs2, qs3), [0, 1, 5, 8, 9], ordered=False) @skipUnlessDBFeature('supports_select_intersection') def test_simple_intersection(self): qs1 = Number.objects.filter(num__lte=5) qs2 = Number.objects.filter(num__gte=5) qs3 = Number.objects.filter(num__gte=4, num__lte=6) self.assertNumbersEqual(qs1.intersection(qs2, qs3), [5], ordered=False) @skipUnlessDBFeature('supports_select_intersection') def test_intersection_with_values(self): ReservedName.objects.create(name='a', order=2) qs1 = ReservedName.objects.all() reserved_name = qs1.intersection(qs1).values('name', 'order', 'id').get() self.assertEqual(reserved_name['name'], 'a') self.assertEqual(reserved_name['order'], 2) reserved_name = qs1.intersection(qs1).values_list('name', 'order', 'id').get() self.assertEqual(reserved_name[:2], ('a', 2)) @skipUnlessDBFeature('supports_select_difference') def test_simple_difference(self): qs1 = Number.objects.filter(num__lte=5) qs2 = Number.objects.filter(num__lte=4) self.assertNumbersEqual(qs1.difference(qs2), [5], ordered=False) def test_union_distinct(self): qs1 = Number.objects.all() qs2 = Number.objects.all() self.assertEqual(len(list(qs1.union(qs2, all=True))), 20) self.assertEqual(len(list(qs1.union(qs2))), 10) @skipUnlessDBFeature('supports_select_intersection') def test_intersection_with_empty_qs(self): qs1 = Number.objects.all() qs2 = Number.objects.none() qs3 = Number.objects.filter(pk__in=[]) self.assertEqual(len(qs1.intersection(qs2)), 0) self.assertEqual(len(qs1.intersection(qs3)), 0) self.assertEqual(len(qs2.intersection(qs1)), 0) self.assertEqual(len(qs3.intersection(qs1)), 0) self.assertEqual(len(qs2.intersection(qs2)), 0) self.assertEqual(len(qs3.intersection(qs3)), 0) @skipUnlessDBFeature('supports_select_difference') def test_difference_with_empty_qs(self): qs1 = Number.objects.all() qs2 = Number.objects.none() qs3 = Number.objects.filter(pk__in=[]) self.assertEqual(len(qs1.difference(qs2)), 10) self.assertEqual(len(qs1.difference(qs3)), 10) self.assertEqual(len(qs2.difference(qs1)), 0) self.assertEqual(len(qs3.difference(qs1)), 0) self.assertEqual(len(qs2.difference(qs2)), 0) self.assertEqual(len(qs3.difference(qs3)), 0) @skipUnlessDBFeature('supports_select_difference') def test_difference_with_values(self): ReservedName.objects.create(name='a', order=2) qs1 = ReservedName.objects.all() qs2 = ReservedName.objects.none() reserved_name = qs1.difference(qs2).values('name', 'order', 'id').get() self.assertEqual(reserved_name['name'], 'a') self.assertEqual(reserved_name['order'], 2) reserved_name = qs1.difference(qs2).values_list('name', 'order', 'id').get() self.assertEqual(reserved_name[:2], ('a', 2)) def test_union_with_empty_qs(self): qs1 = Number.objects.all() qs2 = Number.objects.none() qs3 = Number.objects.filter(pk__in=[]) self.assertEqual(len(qs1.union(qs2)), 10) self.assertEqual(len(qs2.union(qs1)), 10) self.assertEqual(len(qs1.union(qs3)), 10) self.assertEqual(len(qs3.union(qs1)), 10) self.assertEqual(len(qs2.union(qs1, qs1, qs1)), 10) self.assertEqual(len(qs2.union(qs1, qs1, all=True)), 20) self.assertEqual(len(qs2.union(qs2)), 0) self.assertEqual(len(qs3.union(qs3)), 0) def test_limits(self): qs1 = Number.objects.all() qs2 = Number.objects.all() self.assertEqual(len(list(qs1.union(qs2)[:2])), 2) def test_ordering(self): qs1 = Number.objects.filter(num__lte=1) qs2 = Number.objects.filter(num__gte=2, num__lte=3) self.assertNumbersEqual(qs1.union(qs2).order_by('-num'), [3, 2, 1, 0]) def test_ordering_by_f_expression(self): qs1 = Number.objects.filter(num__lte=1) qs2 = Number.objects.filter(num__gte=2, num__lte=3) self.assertNumbersEqual(qs1.union(qs2).order_by(F('num').desc()), [3, 2, 1, 0]) def test_union_with_values(self): ReservedName.objects.create(name='a', order=2) qs1 = ReservedName.objects.all() reserved_name = qs1.union(qs1).values('name', 'order', 'id').get() self.assertEqual(reserved_name['name'], 'a') self.assertEqual(reserved_name['order'], 2) reserved_name = qs1.union(qs1).values_list('name', 'order', 'id').get() self.assertEqual(reserved_name[:2], ('a', 2)) # List of columns can be changed. reserved_name = qs1.union(qs1).values_list('order').get() self.assertEqual(reserved_name, (2,)) def test_union_with_two_annotated_values_list(self): qs1 = Number.objects.filter(num=1).annotate( count=Value(0, IntegerField()), ).values_list('num', 'count') qs2 = Number.objects.filter(num=2).values('pk').annotate( count=F('num'), ).annotate( num=Value(1, IntegerField()), ).values_list('num', 'count') self.assertCountEqual(qs1.union(qs2), [(1, 0), (2, 1)]) def test_union_with_extra_and_values_list(self): qs1 = Number.objects.filter(num=1).extra( select={'count': 0}, ).values_list('num', 'count') qs2 = Number.objects.filter(num=2).extra(select={'count': 1}) self.assertCountEqual(qs1.union(qs2), [(1, 0), (2, 1)]) def test_union_with_values_list_on_annotated_and_unannotated(self): ReservedName.objects.create(name='rn1', order=1) qs1 = Number.objects.annotate( has_reserved_name=Exists(ReservedName.objects.filter(order=OuterRef('num'))) ).filter(has_reserved_name=True) qs2 = Number.objects.filter(num=9) self.assertCountEqual(qs1.union(qs2).values_list('num', flat=True), [1, 9]) def test_union_with_values_list_and_order(self): ReservedName.objects.bulk_create([ ReservedName(name='rn1', order=7), ReservedName(name='rn2', order=5), ReservedName(name='rn0', order=6), ReservedName(name='rn9', order=-1), ]) qs1 = ReservedName.objects.filter(order__gte=6) qs2 = ReservedName.objects.filter(order__lte=5) union_qs = qs1.union(qs2) for qs, expected_result in ( # Order by a single column. (union_qs.order_by('-pk').values_list('order', flat=True), [-1, 6, 5, 7]), (union_qs.order_by('pk').values_list('order', flat=True), [7, 5, 6, -1]), (union_qs.values_list('order', flat=True).order_by('-pk'), [-1, 6, 5, 7]), (union_qs.values_list('order', flat=True).order_by('pk'), [7, 5, 6, -1]), # Order by multiple columns. (union_qs.order_by('-name', 'pk').values_list('order', flat=True), [-1, 5, 7, 6]), (union_qs.values_list('order', flat=True).order_by('-name', 'pk'), [-1, 5, 7, 6]), ): with self.subTest(qs=qs): self.assertEqual(list(qs), expected_result) def test_count_union(self): qs1 = Number.objects.filter(num__lte=1).values('num') qs2 = Number.objects.filter(num__gte=2, num__lte=3).values('num') self.assertEqual(qs1.union(qs2).count(), 4) def test_count_union_empty_result(self): qs = Number.objects.filter(pk__in=[]) self.assertEqual(qs.union(qs).count(), 0) @skipUnlessDBFeature('supports_select_difference') def test_count_difference(self): qs1 = Number.objects.filter(num__lt=10) qs2 = Number.objects.filter(num__lt=9) self.assertEqual(qs1.difference(qs2).count(), 1) @skipUnlessDBFeature('supports_select_intersection') def test_count_intersection(self): qs1 = Number.objects.filter(num__gte=5) qs2 = Number.objects.filter(num__lte=5) self.assertEqual(qs1.intersection(qs2).count(), 1) @skipUnlessDBFeature('supports_slicing_ordering_in_compound') def test_ordering_subqueries(self): qs1 = Number.objects.order_by('num')[:2] qs2 = Number.objects.order_by('-num')[:2] self.assertNumbersEqual(qs1.union(qs2).order_by('-num')[:4], [9, 8, 1, 0]) @skipIfDBFeature('supports_slicing_ordering_in_compound') def test_unsupported_ordering_slicing_raises_db_error(self): qs1 = Number.objects.all() qs2 = Number.objects.all() msg = 'LIMIT/OFFSET not allowed in subqueries of compound statements' with self.assertRaisesMessage(DatabaseError, msg): list(qs1.union(qs2[:10])) msg = 'ORDER BY not allowed in subqueries of compound statements' with self.assertRaisesMessage(DatabaseError, msg): list(qs1.order_by('id').union(qs2)) @skipIfDBFeature('supports_select_intersection') def test_unsupported_intersection_raises_db_error(self): qs1 = Number.objects.all() qs2 = Number.objects.all() msg = 'intersection is not supported on this database backend' with self.assertRaisesMessage(NotSupportedError, msg): list(qs1.intersection(qs2)) def test_combining_multiple_models(self): ReservedName.objects.create(name='99 little bugs', order=99) qs1 = Number.objects.filter(num=1).values_list('num', flat=True) qs2 = ReservedName.objects.values_list('order') self.assertEqual(list(qs1.union(qs2).order_by('num')), [1, 99]) def test_order_raises_on_non_selected_column(self): qs1 = Number.objects.filter().annotate( annotation=Value(1, IntegerField()), ).values('annotation', num2=F('num')) qs2 = Number.objects.filter().values('id', 'num') # Should not raise list(qs1.union(qs2).order_by('annotation')) list(qs1.union(qs2).order_by('num2')) msg = 'ORDER BY term does not match any column in the result set' # 'id' is not part of the select with self.assertRaisesMessage(DatabaseError, msg): list(qs1.union(qs2).order_by('id')) # 'num' got realiased to num2 with self.assertRaisesMessage(DatabaseError, msg): list(qs1.union(qs2).order_by('num')) # switched order, now 'exists' again: list(qs2.union(qs1).order_by('num')) @skipUnlessDBFeature('supports_select_difference', 'supports_select_intersection') def test_qs_with_subcompound_qs(self): qs1 = Number.objects.all() qs2 = Number.objects.intersection(Number.objects.filter(num__gt=1)) self.assertEqual(qs1.difference(qs2).count(), 2) def test_order_by_same_type(self): qs = Number.objects.all() union = qs.union(qs) numbers = list(range(10)) self.assertNumbersEqual(union.order_by('num'), numbers) self.assertNumbersEqual(union.order_by('other_num'), reversed(numbers)) def test_unsupported_operations_on_combined_qs(self): qs = Number.objects.all() msg = 'Calling QuerySet.%s() after %s() is not supported.' combinators = ['union'] if connection.features.supports_select_difference: combinators.append('difference') if connection.features.supports_select_intersection: combinators.append('intersection') for combinator in combinators: for operation in ( 'annotate', 'defer', 'delete', 'exclude', 'extra', 'filter', 'only', 'prefetch_related', 'select_related', 'update', ): with self.subTest(combinator=combinator, operation=operation): with self.assertRaisesMessage( NotSupportedError, msg % (operation, combinator), ): getattr(getattr(qs, combinator)(qs), operation)()
270362a8e801996608487c2d9b31eb04c15a54e04b4a0442f73f74aef335148f
import os from django.template import Context, Engine, TemplateSyntaxError from django.template.base import Node from django.template.library import InvalidTemplateLibrary from django.test import SimpleTestCase from django.test.utils import extend_sys_path from .templatetags import custom, inclusion from .utils import ROOT LIBRARIES = { 'custom': 'template_tests.templatetags.custom', 'inclusion': 'template_tests.templatetags.inclusion', } class CustomFilterTests(SimpleTestCase): def test_filter(self): engine = Engine(libraries=LIBRARIES) t = engine.from_string("{% load custom %}{{ string|trim:5 }}") self.assertEqual( t.render(Context({"string": "abcdefghijklmnopqrstuvwxyz"})), "abcde" ) def test_decorated_filter(self): engine = Engine(libraries=LIBRARIES) t = engine.from_string('{% load custom %}{{ name|make_data_div }}') self.assertEqual(t.render(Context({'name': 'foo'})), '<div data-name="foo"></div>') class TagTestCase(SimpleTestCase): @classmethod def setUpClass(cls): cls.engine = Engine(app_dirs=True, libraries=LIBRARIES) super().setUpClass() def verify_tag(self, tag, name): self.assertEqual(tag.__name__, name) self.assertEqual(tag.__doc__, 'Expected %s __doc__' % name) self.assertEqual(tag.__dict__['anything'], 'Expected %s __dict__' % name) class SimpleTagTests(TagTestCase): def test_simple_tags(self): c = Context({'value': 42}) templates = [ ('{% load custom %}{% no_params %}', 'no_params - Expected result'), ('{% load custom %}{% one_param 37 %}', 'one_param - Expected result: 37'), ('{% load custom %}{% explicit_no_context 37 %}', 'explicit_no_context - Expected result: 37'), ('{% load custom %}{% no_params_with_context %}', 'no_params_with_context - Expected result (context value: 42)'), ('{% load custom %}{% params_and_context 37 %}', 'params_and_context - Expected result (context value: 42): 37'), ('{% load custom %}{% simple_two_params 37 42 %}', 'simple_two_params - Expected result: 37, 42'), ('{% load custom %}{% simple_keyword_only_param kwarg=37 %}', 'simple_keyword_only_param - Expected result: 37'), ('{% load custom %}{% simple_keyword_only_default %}', 'simple_keyword_only_default - Expected result: 42'), ( '{% load custom %}{% simple_keyword_only_default kwarg=37 %}', 'simple_keyword_only_default - Expected result: 37', ), ('{% load custom %}{% simple_one_default 37 %}', 'simple_one_default - Expected result: 37, hi'), ('{% load custom %}{% simple_one_default 37 two="hello" %}', 'simple_one_default - Expected result: 37, hello'), ('{% load custom %}{% simple_one_default one=99 two="hello" %}', 'simple_one_default - Expected result: 99, hello'), ('{% load custom %}{% simple_one_default 37 42 %}', 'simple_one_default - Expected result: 37, 42'), ('{% load custom %}{% simple_unlimited_args 37 %}', 'simple_unlimited_args - Expected result: 37, hi'), ('{% load custom %}{% simple_unlimited_args 37 42 56 89 %}', 'simple_unlimited_args - Expected result: 37, 42, 56, 89'), ('{% load custom %}{% simple_only_unlimited_args %}', 'simple_only_unlimited_args - Expected result: '), ('{% load custom %}{% simple_only_unlimited_args 37 42 56 89 %}', 'simple_only_unlimited_args - Expected result: 37, 42, 56, 89'), ('{% load custom %}{% simple_unlimited_args_kwargs 37 40|add:2 56 eggs="scrambled" four=1|add:3 %}', 'simple_unlimited_args_kwargs - Expected result: 37, 42, 56 / eggs=scrambled, four=4'), ] for entry in templates: t = self.engine.from_string(entry[0]) self.assertEqual(t.render(c), entry[1]) for entry in templates: t = self.engine.from_string("%s as var %%}Result: {{ var }}" % entry[0][0:-2]) self.assertEqual(t.render(c), "Result: %s" % entry[1]) def test_simple_tag_errors(self): errors = [ ("'simple_one_default' received unexpected keyword argument 'three'", '{% load custom %}{% simple_one_default 99 two="hello" three="foo" %}'), ("'simple_two_params' received too many positional arguments", '{% load custom %}{% simple_two_params 37 42 56 %}'), ("'simple_one_default' received too many positional arguments", '{% load custom %}{% simple_one_default 37 42 56 %}'), ("'simple_keyword_only_param' did not receive value(s) for the argument(s): 'kwarg'", '{% load custom %}{% simple_keyword_only_param %}'), ( "'simple_keyword_only_param' received multiple values for " "keyword argument 'kwarg'", '{% load custom %}{% simple_keyword_only_param kwarg=42 ' 'kwarg=37 %}', ), ( "'simple_keyword_only_default' received multiple values for " "keyword argument 'kwarg'", '{% load custom %}{% simple_keyword_only_default kwarg=42 ' 'kwarg=37 %}', ), ("'simple_unlimited_args_kwargs' received some positional argument(s) after some keyword argument(s)", '{% load custom %}{% simple_unlimited_args_kwargs 37 40|add:2 eggs="scrambled" 56 four=1|add:3 %}'), ("'simple_unlimited_args_kwargs' received multiple values for keyword argument 'eggs'", '{% load custom %}{% simple_unlimited_args_kwargs 37 eggs="scrambled" eggs="scrambled" %}'), ] for entry in errors: with self.assertRaisesMessage(TemplateSyntaxError, entry[0]): self.engine.from_string(entry[1]) for entry in errors: with self.assertRaisesMessage(TemplateSyntaxError, entry[0]): self.engine.from_string("%s as var %%}" % entry[1][0:-2]) def test_simple_tag_escaping_autoescape_off(self): c = Context({'name': "Jack & Jill"}, autoescape=False) t = self.engine.from_string("{% load custom %}{% escape_naive %}") self.assertEqual(t.render(c), "Hello Jack & Jill!") def test_simple_tag_naive_escaping(self): c = Context({'name': "Jack & Jill"}) t = self.engine.from_string("{% load custom %}{% escape_naive %}") self.assertEqual(t.render(c), "Hello Jack &amp; Jill!") def test_simple_tag_explicit_escaping(self): # Check we don't double escape c = Context({'name': "Jack & Jill"}) t = self.engine.from_string("{% load custom %}{% escape_explicit %}") self.assertEqual(t.render(c), "Hello Jack &amp; Jill!") def test_simple_tag_format_html_escaping(self): # Check we don't double escape c = Context({'name': "Jack & Jill"}) t = self.engine.from_string("{% load custom %}{% escape_format_html %}") self.assertEqual(t.render(c), "Hello Jack &amp; Jill!") def test_simple_tag_registration(self): # The decorators preserve the decorated function's docstring, name, # and attributes. self.verify_tag(custom.no_params, 'no_params') self.verify_tag(custom.one_param, 'one_param') self.verify_tag(custom.explicit_no_context, 'explicit_no_context') self.verify_tag(custom.no_params_with_context, 'no_params_with_context') self.verify_tag(custom.params_and_context, 'params_and_context') self.verify_tag(custom.simple_unlimited_args_kwargs, 'simple_unlimited_args_kwargs') self.verify_tag(custom.simple_tag_without_context_parameter, 'simple_tag_without_context_parameter') def test_simple_tag_missing_context(self): # The 'context' parameter must be present when takes_context is True msg = ( "'simple_tag_without_context_parameter' is decorated with " "takes_context=True so it must have a first argument of 'context'" ) with self.assertRaisesMessage(TemplateSyntaxError, msg): self.engine.from_string('{% load custom %}{% simple_tag_without_context_parameter 123 %}') class InclusionTagTests(TagTestCase): def test_inclusion_tags(self): c = Context({'value': 42}) templates = [ ('{% load inclusion %}{% inclusion_no_params %}', 'inclusion_no_params - Expected result\n'), ('{% load inclusion %}{% inclusion_one_param 37 %}', 'inclusion_one_param - Expected result: 37\n'), ('{% load inclusion %}{% inclusion_explicit_no_context 37 %}', 'inclusion_explicit_no_context - Expected result: 37\n'), ('{% load inclusion %}{% inclusion_no_params_with_context %}', 'inclusion_no_params_with_context - Expected result (context value: 42)\n'), ('{% load inclusion %}{% inclusion_params_and_context 37 %}', 'inclusion_params_and_context - Expected result (context value: 42): 37\n'), ('{% load inclusion %}{% inclusion_two_params 37 42 %}', 'inclusion_two_params - Expected result: 37, 42\n'), ( '{% load inclusion %}{% inclusion_one_default 37 %}', 'inclusion_one_default - Expected result: 37, hi\n' ), ('{% load inclusion %}{% inclusion_one_default 37 two="hello" %}', 'inclusion_one_default - Expected result: 37, hello\n'), ('{% load inclusion %}{% inclusion_one_default one=99 two="hello" %}', 'inclusion_one_default - Expected result: 99, hello\n'), ('{% load inclusion %}{% inclusion_one_default 37 42 %}', 'inclusion_one_default - Expected result: 37, 42\n'), ( '{% load inclusion %}{% inclusion_keyword_only_default kwarg=37 %}', 'inclusion_keyword_only_default - Expected result: 37\n', ), ('{% load inclusion %}{% inclusion_unlimited_args 37 %}', 'inclusion_unlimited_args - Expected result: 37, hi\n'), ('{% load inclusion %}{% inclusion_unlimited_args 37 42 56 89 %}', 'inclusion_unlimited_args - Expected result: 37, 42, 56, 89\n'), ('{% load inclusion %}{% inclusion_only_unlimited_args %}', 'inclusion_only_unlimited_args - Expected result: \n'), ('{% load inclusion %}{% inclusion_only_unlimited_args 37 42 56 89 %}', 'inclusion_only_unlimited_args - Expected result: 37, 42, 56, 89\n'), ('{% load inclusion %}{% inclusion_unlimited_args_kwargs 37 40|add:2 56 eggs="scrambled" four=1|add:3 %}', 'inclusion_unlimited_args_kwargs - Expected result: 37, 42, 56 / eggs=scrambled, four=4\n'), ] for entry in templates: t = self.engine.from_string(entry[0]) self.assertEqual(t.render(c), entry[1]) def test_inclusion_tag_errors(self): errors = [ ("'inclusion_one_default' received unexpected keyword argument 'three'", '{% load inclusion %}{% inclusion_one_default 99 two="hello" three="foo" %}'), ("'inclusion_two_params' received too many positional arguments", '{% load inclusion %}{% inclusion_two_params 37 42 56 %}'), ("'inclusion_one_default' received too many positional arguments", '{% load inclusion %}{% inclusion_one_default 37 42 56 %}'), ("'inclusion_one_default' did not receive value(s) for the argument(s): 'one'", '{% load inclusion %}{% inclusion_one_default %}'), ( "'inclusion_keyword_only_default' received multiple values " "for keyword argument 'kwarg'", '{% load inclusion %}{% inclusion_keyword_only_default ' 'kwarg=37 kwarg=42 %}', ), ("'inclusion_unlimited_args' did not receive value(s) for the argument(s): 'one'", '{% load inclusion %}{% inclusion_unlimited_args %}'), ( "'inclusion_unlimited_args_kwargs' received some positional argument(s) " "after some keyword argument(s)", '{% load inclusion %}{% inclusion_unlimited_args_kwargs 37 40|add:2 eggs="boiled" 56 four=1|add:3 %}', ), ("'inclusion_unlimited_args_kwargs' received multiple values for keyword argument 'eggs'", '{% load inclusion %}{% inclusion_unlimited_args_kwargs 37 eggs="scrambled" eggs="scrambled" %}'), ] for entry in errors: with self.assertRaisesMessage(TemplateSyntaxError, entry[0]): self.engine.from_string(entry[1]) def test_include_tag_missing_context(self): # The 'context' parameter must be present when takes_context is True msg = ( "'inclusion_tag_without_context_parameter' is decorated with " "takes_context=True so it must have a first argument of 'context'" ) with self.assertRaisesMessage(TemplateSyntaxError, msg): self.engine.from_string('{% load inclusion %}{% inclusion_tag_without_context_parameter 123 %}') def test_inclusion_tags_from_template(self): c = Context({'value': 42}) templates = [ ('{% load inclusion %}{% inclusion_no_params_from_template %}', 'inclusion_no_params_from_template - Expected result\n'), ('{% load inclusion %}{% inclusion_one_param_from_template 37 %}', 'inclusion_one_param_from_template - Expected result: 37\n'), ('{% load inclusion %}{% inclusion_explicit_no_context_from_template 37 %}', 'inclusion_explicit_no_context_from_template - Expected result: 37\n'), ('{% load inclusion %}{% inclusion_no_params_with_context_from_template %}', 'inclusion_no_params_with_context_from_template - Expected result (context value: 42)\n'), ('{% load inclusion %}{% inclusion_params_and_context_from_template 37 %}', 'inclusion_params_and_context_from_template - Expected result (context value: 42): 37\n'), ('{% load inclusion %}{% inclusion_two_params_from_template 37 42 %}', 'inclusion_two_params_from_template - Expected result: 37, 42\n'), ('{% load inclusion %}{% inclusion_one_default_from_template 37 %}', 'inclusion_one_default_from_template - Expected result: 37, hi\n'), ('{% load inclusion %}{% inclusion_one_default_from_template 37 42 %}', 'inclusion_one_default_from_template - Expected result: 37, 42\n'), ('{% load inclusion %}{% inclusion_unlimited_args_from_template 37 %}', 'inclusion_unlimited_args_from_template - Expected result: 37, hi\n'), ('{% load inclusion %}{% inclusion_unlimited_args_from_template 37 42 56 89 %}', 'inclusion_unlimited_args_from_template - Expected result: 37, 42, 56, 89\n'), ('{% load inclusion %}{% inclusion_only_unlimited_args_from_template %}', 'inclusion_only_unlimited_args_from_template - Expected result: \n'), ('{% load inclusion %}{% inclusion_only_unlimited_args_from_template 37 42 56 89 %}', 'inclusion_only_unlimited_args_from_template - Expected result: 37, 42, 56, 89\n'), ] for entry in templates: t = self.engine.from_string(entry[0]) self.assertEqual(t.render(c), entry[1]) def test_inclusion_tag_registration(self): # The decorators preserve the decorated function's docstring, name, # and attributes. self.verify_tag(inclusion.inclusion_no_params, 'inclusion_no_params') self.verify_tag(inclusion.inclusion_one_param, 'inclusion_one_param') self.verify_tag(inclusion.inclusion_explicit_no_context, 'inclusion_explicit_no_context') self.verify_tag(inclusion.inclusion_no_params_with_context, 'inclusion_no_params_with_context') self.verify_tag(inclusion.inclusion_params_and_context, 'inclusion_params_and_context') self.verify_tag(inclusion.inclusion_two_params, 'inclusion_two_params') self.verify_tag(inclusion.inclusion_one_default, 'inclusion_one_default') self.verify_tag(inclusion.inclusion_unlimited_args, 'inclusion_unlimited_args') self.verify_tag(inclusion.inclusion_only_unlimited_args, 'inclusion_only_unlimited_args') self.verify_tag(inclusion.inclusion_tag_without_context_parameter, 'inclusion_tag_without_context_parameter') self.verify_tag(inclusion.inclusion_tag_use_l10n, 'inclusion_tag_use_l10n') self.verify_tag(inclusion.inclusion_unlimited_args_kwargs, 'inclusion_unlimited_args_kwargs') def test_15070_use_l10n(self): """ Inclusion tag passes down `use_l10n` of context to the Context of the included/rendered template as well. """ c = Context({}) t = self.engine.from_string('{% load inclusion %}{% inclusion_tag_use_l10n %}') self.assertEqual(t.render(c).strip(), 'None') c.use_l10n = True self.assertEqual(t.render(c).strip(), 'True') def test_no_render_side_effect(self): """ #23441 -- InclusionNode shouldn't modify its nodelist at render time. """ engine = Engine(app_dirs=True, libraries=LIBRARIES) template = engine.from_string('{% load inclusion %}{% inclusion_no_params %}') count = template.nodelist.get_nodes_by_type(Node) template.render(Context({})) self.assertEqual(template.nodelist.get_nodes_by_type(Node), count) def test_render_context_is_cleared(self): """ #24555 -- InclusionNode should push and pop the render_context stack when rendering. Otherwise, leftover values such as blocks from extending can interfere with subsequent rendering. """ engine = Engine(app_dirs=True, libraries=LIBRARIES) template = engine.from_string('{% load inclusion %}{% inclusion_extends1 %}{% inclusion_extends2 %}') self.assertEqual(template.render(Context({})).strip(), 'one\ntwo') class TemplateTagLoadingTests(SimpleTestCase): @classmethod def setUpClass(cls): cls.egg_dir = os.path.join(ROOT, 'eggs') super().setUpClass() def test_load_error(self): msg = ( "Invalid template library specified. ImportError raised when " "trying to load 'template_tests.broken_tag': cannot import name " "'Xtemplate'" ) with self.assertRaisesMessage(InvalidTemplateLibrary, msg): Engine(libraries={'broken_tag': 'template_tests.broken_tag'}) def test_load_error_egg(self): egg_name = '%s/tagsegg.egg' % self.egg_dir msg = ( "Invalid template library specified. ImportError raised when " "trying to load 'tagsegg.templatetags.broken_egg': cannot " "import name 'Xtemplate'" ) with extend_sys_path(egg_name): with self.assertRaisesMessage(InvalidTemplateLibrary, msg): Engine(libraries={'broken_egg': 'tagsegg.templatetags.broken_egg'}) def test_load_working_egg(self): ttext = "{% load working_egg %}" egg_name = '%s/tagsegg.egg' % self.egg_dir with extend_sys_path(egg_name): engine = Engine(libraries={ 'working_egg': 'tagsegg.templatetags.working_egg', }) engine.from_string(ttext) def test_load_annotated_function(self): Engine(libraries={ 'annotated_tag_function': 'template_tests.annotated_tag_function', })
85eadf71a95746231a126a64b46b92cbafb4ddf2771af45ab93bc363ecfc8b1a
from django.core.checks import Error from django.core.checks.translation import ( check_language_settings_consistent, check_setting_language_code, check_setting_languages, check_setting_languages_bidi, ) from django.test import SimpleTestCase, override_settings class TranslationCheckTests(SimpleTestCase): def setUp(self): self.valid_tags = ( 'en', # language 'mas', # language 'sgn-ase', # language+extlang 'fr-CA', # language+region 'es-419', # language+region 'zh-Hans', # language+script 'ca-ES-valencia', # language+region+variant # FIXME: The following should be invalid: 'sr@latin', # language+script ) self.invalid_tags = ( None, # invalid type: None. 123, # invalid type: int. b'en', # invalid type: bytes. 'eü', # non-latin characters. 'en_US', # locale format. 'en--us', # empty subtag. '-en', # leading separator. 'en-', # trailing separator. 'en-US.UTF-8', # language tag w/ locale encoding. 'en_US.UTF-8', # locale format - language w/ region and encoding. 'ca_ES@valencia', # locale format - language w/ region and variant. # FIXME: The following should be invalid: # 'sr@latin', # locale instead of language tag. ) def test_valid_language_code(self): for tag in self.valid_tags: with self.subTest(tag), self.settings(LANGUAGE_CODE=tag): self.assertEqual(check_setting_language_code(None), []) def test_invalid_language_code(self): msg = 'You have provided an invalid value for the LANGUAGE_CODE setting: %r.' for tag in self.invalid_tags: with self.subTest(tag), self.settings(LANGUAGE_CODE=tag): self.assertEqual(check_setting_language_code(None), [ Error(msg % tag, id='translation.E001'), ]) def test_valid_languages(self): for tag in self.valid_tags: with self.subTest(tag), self.settings(LANGUAGES=[(tag, tag)]): self.assertEqual(check_setting_languages(None), []) def test_invalid_languages(self): msg = 'You have provided an invalid language code in the LANGUAGES setting: %r.' for tag in self.invalid_tags: with self.subTest(tag), self.settings(LANGUAGES=[(tag, tag)]): self.assertEqual(check_setting_languages(None), [ Error(msg % tag, id='translation.E002'), ]) def test_valid_languages_bidi(self): for tag in self.valid_tags: with self.subTest(tag), self.settings(LANGUAGES_BIDI=[tag]): self.assertEqual(check_setting_languages_bidi(None), []) def test_invalid_languages_bidi(self): msg = 'You have provided an invalid language code in the LANGUAGES_BIDI setting: %r.' for tag in self.invalid_tags: with self.subTest(tag), self.settings(LANGUAGES_BIDI=[tag]): self.assertEqual(check_setting_languages_bidi(None), [ Error(msg % tag, id='translation.E003'), ]) @override_settings(USE_I18N=True, LANGUAGES=[('en', 'English')]) def test_inconsistent_language_settings(self): msg = ( 'You have provided a value for the LANGUAGE_CODE setting that is ' 'not in the LANGUAGES setting.' ) for tag in ['fr', 'fr-CA', 'fr-357']: with self.subTest(tag), self.settings(LANGUAGE_CODE=tag): self.assertEqual(check_language_settings_consistent(None), [ Error(msg, id='translation.E004'), ]) @override_settings( USE_I18N=True, LANGUAGES=[ ('de', 'German'), ('es', 'Spanish'), ('fr', 'French'), ('ca', 'Catalan'), ], ) def test_valid_variant_consistent_language_settings(self): tests = [ # language + region. 'fr-CA', 'es-419', 'de-at', # language + region + variant. 'ca-ES-valencia', ] for tag in tests: with self.subTest(tag), self.settings(LANGUAGE_CODE=tag): self.assertEqual(check_language_settings_consistent(None), [])
8227227f7787eca1ae8ad9e016aae9f5ab5faf66516c535494cf5fa32b1c0811
import unittest import uuid from django.core.checks import Error, Warning as DjangoWarning from django.db import connection, models from django.test import SimpleTestCase, TestCase, skipIfDBFeature from django.test.utils import isolate_apps, override_settings from django.utils.functional import lazy from django.utils.timezone import now from django.utils.translation import gettext_lazy as _ @isolate_apps('invalid_models_tests') class AutoFieldTests(SimpleTestCase): def test_valid_case(self): class Model(models.Model): id = models.AutoField(primary_key=True) field = Model._meta.get_field('id') self.assertEqual(field.check(), []) def test_primary_key(self): # primary_key must be True. Refs #12467. class Model(models.Model): field = models.AutoField(primary_key=False) # Prevent Django from autocreating `id` AutoField, which would # result in an error, because a model must have exactly one # AutoField. another = models.IntegerField(primary_key=True) field = Model._meta.get_field('field') self.assertEqual(field.check(), [ Error( 'AutoFields must set primary_key=True.', obj=field, id='fields.E100', ), ]) def test_max_length_warning(self): class Model(models.Model): auto = models.AutoField(primary_key=True, max_length=2) field = Model._meta.get_field('auto') self.assertEqual(field.check(), [ DjangoWarning( "'max_length' is ignored when used with %s." % field.__class__.__name__, hint="Remove 'max_length' from field", obj=field, id='fields.W122', ), ]) @isolate_apps('invalid_models_tests') class BinaryFieldTests(SimpleTestCase): def test_valid_default_value(self): class Model(models.Model): field1 = models.BinaryField(default=b'test') field2 = models.BinaryField(default=None) for field_name in ('field1', 'field2'): field = Model._meta.get_field(field_name) self.assertEqual(field.check(), []) def test_str_default_value(self): class Model(models.Model): field = models.BinaryField(default='test') field = Model._meta.get_field('field') self.assertEqual(field.check(), [ Error( "BinaryField's default cannot be a string. Use bytes content " "instead.", obj=field, id='fields.E170', ), ]) @isolate_apps('invalid_models_tests') class CharFieldTests(SimpleTestCase): def test_valid_field(self): class Model(models.Model): field = models.CharField( max_length=255, choices=[ ('1', 'item1'), ('2', 'item2'), ], db_index=True, ) field = Model._meta.get_field('field') self.assertEqual(field.check(), []) def test_missing_max_length(self): class Model(models.Model): field = models.CharField() field = Model._meta.get_field('field') self.assertEqual(field.check(), [ Error( "CharFields must define a 'max_length' attribute.", obj=field, id='fields.E120', ), ]) def test_negative_max_length(self): class Model(models.Model): field = models.CharField(max_length=-1) field = Model._meta.get_field('field') self.assertEqual(field.check(), [ Error( "'max_length' must be a positive integer.", obj=field, id='fields.E121', ), ]) def test_bad_max_length_value(self): class Model(models.Model): field = models.CharField(max_length="bad") field = Model._meta.get_field('field') self.assertEqual(field.check(), [ Error( "'max_length' must be a positive integer.", obj=field, id='fields.E121', ), ]) def test_str_max_length_value(self): class Model(models.Model): field = models.CharField(max_length='20') field = Model._meta.get_field('field') self.assertEqual(field.check(), [ Error( "'max_length' must be a positive integer.", obj=field, id='fields.E121', ), ]) def test_str_max_length_type(self): class Model(models.Model): field = models.CharField(max_length=True) field = Model._meta.get_field('field') self.assertEqual(field.check(), [ Error( "'max_length' must be a positive integer.", obj=field, id='fields.E121' ), ]) def test_non_iterable_choices(self): class Model(models.Model): field = models.CharField(max_length=10, choices='bad') field = Model._meta.get_field('field') self.assertEqual(field.check(), [ Error( "'choices' must be an iterable (e.g., a list or tuple).", obj=field, id='fields.E004', ), ]) def test_non_iterable_choices_two_letters(self): """Two letters isn't a valid choice pair.""" class Model(models.Model): field = models.CharField(max_length=10, choices=['ab']) field = Model._meta.get_field('field') self.assertEqual(field.check(), [ Error( "'choices' must be an iterable containing (actual value, " "human readable name) tuples.", obj=field, id='fields.E005', ), ]) def test_iterable_of_iterable_choices(self): class ThingItem: def __init__(self, value, display): self.value = value self.display = display def __iter__(self): return iter((self.value, self.display)) def __len__(self): return 2 class Things: def __iter__(self): return iter((ThingItem(1, 2), ThingItem(3, 4))) class ThingWithIterableChoices(models.Model): thing = models.CharField(max_length=100, blank=True, choices=Things()) self.assertEqual(ThingWithIterableChoices._meta.get_field('thing').check(), []) def test_choices_containing_non_pairs(self): class Model(models.Model): field = models.CharField(max_length=10, choices=[(1, 2, 3), (1, 2, 3)]) class Model2(models.Model): field = models.IntegerField(choices=[0]) for model in (Model, Model2): with self.subTest(model.__name__): field = model._meta.get_field('field') self.assertEqual(field.check(), [ Error( "'choices' must be an iterable containing (actual " "value, human readable name) tuples.", obj=field, id='fields.E005', ), ]) def test_choices_containing_lazy(self): class Model(models.Model): field = models.CharField(max_length=10, choices=[['1', _('1')], ['2', _('2')]]) self.assertEqual(Model._meta.get_field('field').check(), []) def test_lazy_choices(self): class Model(models.Model): field = models.CharField(max_length=10, choices=lazy(lambda: [[1, '1'], [2, '2']], tuple)()) self.assertEqual(Model._meta.get_field('field').check(), []) def test_choices_named_group(self): class Model(models.Model): field = models.CharField( max_length=10, choices=[ ['knights', [['L', 'Lancelot'], ['G', 'Galahad']]], ['wizards', [['T', 'Tim the Enchanter']]], ['R', 'Random character'], ], ) self.assertEqual(Model._meta.get_field('field').check(), []) def test_choices_named_group_non_pairs(self): class Model(models.Model): field = models.CharField( max_length=10, choices=[['knights', [['L', 'Lancelot', 'Du Lac']]]], ) field = Model._meta.get_field('field') self.assertEqual(field.check(), [ Error( "'choices' must be an iterable containing (actual value, " "human readable name) tuples.", obj=field, id='fields.E005', ), ]) def test_choices_named_group_bad_structure(self): class Model(models.Model): field = models.CharField( max_length=10, choices=[ ['knights', [ ['Noble', [['G', 'Galahad']]], ['Combative', [['L', 'Lancelot']]], ]], ], ) field = Model._meta.get_field('field') self.assertEqual(field.check(), [ Error( "'choices' must be an iterable containing (actual value, " "human readable name) tuples.", obj=field, id='fields.E005', ), ]) def test_choices_named_group_lazy(self): class Model(models.Model): field = models.CharField( max_length=10, choices=[ [_('knights'), [['L', _('Lancelot')], ['G', _('Galahad')]]], ['R', _('Random character')], ], ) self.assertEqual(Model._meta.get_field('field').check(), []) def test_choices_in_max_length(self): class Model(models.Model): field = models.CharField( max_length=2, choices=[ ('ABC', 'Value Too Long!'), ('OK', 'Good') ], ) group = models.CharField( max_length=2, choices=[ ('Nested', [('OK', 'Good'), ('Longer', 'Longer')]), ('Grouped', [('Bad', 'Bad')]), ], ) for name, choice_max_length in (('field', 3), ('group', 6)): with self.subTest(name): field = Model._meta.get_field(name) self.assertEqual(field.check(), [ Error( "'max_length' is too small to fit the longest value " "in 'choices' (%d characters)." % choice_max_length, obj=field, id='fields.E009', ), ]) def test_bad_db_index_value(self): class Model(models.Model): field = models.CharField(max_length=10, db_index='bad') field = Model._meta.get_field('field') self.assertEqual(field.check(), [ Error( "'db_index' must be None, True or False.", obj=field, id='fields.E006', ), ]) def test_bad_validators(self): class Model(models.Model): field = models.CharField(max_length=10, validators=[True]) field = Model._meta.get_field('field') self.assertEqual(field.check(), [ Error( "All 'validators' must be callable.", hint=( "validators[0] (True) isn't a function or instance of a " "validator class." ), obj=field, id='fields.E008', ), ]) @unittest.skipUnless(connection.vendor == 'mysql', "Test valid only for MySQL") def test_too_long_char_field_under_mysql(self): from django.db.backends.mysql.validation import DatabaseValidation class Model(models.Model): field = models.CharField(unique=True, max_length=256) field = Model._meta.get_field('field') validator = DatabaseValidation(connection=connection) self.assertEqual(validator.check_field(field), [ Error( 'MySQL does not allow unique CharFields to have a max_length > 255.', obj=field, id='mysql.E001', ) ]) @isolate_apps('invalid_models_tests') class DateFieldTests(SimpleTestCase): maxDiff = None def test_auto_now_and_auto_now_add_raise_error(self): class Model(models.Model): field0 = models.DateTimeField(auto_now=True, auto_now_add=True, default=now) field1 = models.DateTimeField(auto_now=True, auto_now_add=False, default=now) field2 = models.DateTimeField(auto_now=False, auto_now_add=True, default=now) field3 = models.DateTimeField(auto_now=True, auto_now_add=True, default=None) expected = [] checks = [] for i in range(4): field = Model._meta.get_field('field%d' % i) expected.append(Error( "The options auto_now, auto_now_add, and default " "are mutually exclusive. Only one of these options " "may be present.", obj=field, id='fields.E160', )) checks.extend(field.check()) self.assertEqual(checks, expected) def test_fix_default_value(self): class Model(models.Model): field_dt = models.DateField(default=now()) field_d = models.DateField(default=now().date()) field_now = models.DateField(default=now) field_dt = Model._meta.get_field('field_dt') field_d = Model._meta.get_field('field_d') field_now = Model._meta.get_field('field_now') errors = field_dt.check() errors.extend(field_d.check()) errors.extend(field_now.check()) # doesn't raise a warning self.assertEqual(errors, [ DjangoWarning( 'Fixed default value provided.', hint='It seems you set a fixed date / time / datetime ' 'value as default for this field. This may not be ' 'what you want. If you want to have the current date ' 'as default, use `django.utils.timezone.now`', obj=field_dt, id='fields.W161', ), DjangoWarning( 'Fixed default value provided.', hint='It seems you set a fixed date / time / datetime ' 'value as default for this field. This may not be ' 'what you want. If you want to have the current date ' 'as default, use `django.utils.timezone.now`', obj=field_d, id='fields.W161', ) ]) @override_settings(USE_TZ=True) def test_fix_default_value_tz(self): self.test_fix_default_value() @isolate_apps('invalid_models_tests') class DateTimeFieldTests(SimpleTestCase): maxDiff = None def test_fix_default_value(self): class Model(models.Model): field_dt = models.DateTimeField(default=now()) field_d = models.DateTimeField(default=now().date()) field_now = models.DateTimeField(default=now) field_dt = Model._meta.get_field('field_dt') field_d = Model._meta.get_field('field_d') field_now = Model._meta.get_field('field_now') errors = field_dt.check() errors.extend(field_d.check()) errors.extend(field_now.check()) # doesn't raise a warning self.assertEqual(errors, [ DjangoWarning( 'Fixed default value provided.', hint='It seems you set a fixed date / time / datetime ' 'value as default for this field. This may not be ' 'what you want. If you want to have the current date ' 'as default, use `django.utils.timezone.now`', obj=field_dt, id='fields.W161', ), DjangoWarning( 'Fixed default value provided.', hint='It seems you set a fixed date / time / datetime ' 'value as default for this field. This may not be ' 'what you want. If you want to have the current date ' 'as default, use `django.utils.timezone.now`', obj=field_d, id='fields.W161', ) ]) @override_settings(USE_TZ=True) def test_fix_default_value_tz(self): self.test_fix_default_value() @isolate_apps('invalid_models_tests') class DecimalFieldTests(SimpleTestCase): def test_required_attributes(self): class Model(models.Model): field = models.DecimalField() field = Model._meta.get_field('field') self.assertEqual(field.check(), [ Error( "DecimalFields must define a 'decimal_places' attribute.", obj=field, id='fields.E130', ), Error( "DecimalFields must define a 'max_digits' attribute.", obj=field, id='fields.E132', ), ]) def test_negative_max_digits_and_decimal_places(self): class Model(models.Model): field = models.DecimalField(max_digits=-1, decimal_places=-1) field = Model._meta.get_field('field') self.assertEqual(field.check(), [ Error( "'decimal_places' must be a non-negative integer.", obj=field, id='fields.E131', ), Error( "'max_digits' must be a positive integer.", obj=field, id='fields.E133', ), ]) def test_bad_values_of_max_digits_and_decimal_places(self): class Model(models.Model): field = models.DecimalField(max_digits="bad", decimal_places="bad") field = Model._meta.get_field('field') self.assertEqual(field.check(), [ Error( "'decimal_places' must be a non-negative integer.", obj=field, id='fields.E131', ), Error( "'max_digits' must be a positive integer.", obj=field, id='fields.E133', ), ]) def test_decimal_places_greater_than_max_digits(self): class Model(models.Model): field = models.DecimalField(max_digits=9, decimal_places=10) field = Model._meta.get_field('field') self.assertEqual(field.check(), [ Error( "'max_digits' must be greater or equal to 'decimal_places'.", obj=field, id='fields.E134', ), ]) def test_valid_field(self): class Model(models.Model): field = models.DecimalField(max_digits=10, decimal_places=10) field = Model._meta.get_field('field') self.assertEqual(field.check(), []) @isolate_apps('invalid_models_tests') class FileFieldTests(SimpleTestCase): def test_valid_default_case(self): class Model(models.Model): field = models.FileField() self.assertEqual(Model._meta.get_field('field').check(), []) def test_valid_case(self): class Model(models.Model): field = models.FileField(upload_to='somewhere') field = Model._meta.get_field('field') self.assertEqual(field.check(), []) def test_primary_key(self): class Model(models.Model): field = models.FileField(primary_key=False, upload_to='somewhere') field = Model._meta.get_field('field') self.assertEqual(field.check(), [ Error( "'primary_key' is not a valid argument for a FileField.", obj=field, id='fields.E201', ) ]) def test_upload_to_starts_with_slash(self): class Model(models.Model): field = models.FileField(upload_to='/somewhere') field = Model._meta.get_field('field') self.assertEqual(field.check(), [ Error( "FileField's 'upload_to' argument must be a relative path, not " "an absolute path.", obj=field, id='fields.E202', hint='Remove the leading slash.', ) ]) def test_upload_to_callable_not_checked(self): def callable(instance, filename): return '/' + filename class Model(models.Model): field = models.FileField(upload_to=callable) field = Model._meta.get_field('field') self.assertEqual(field.check(), []) @isolate_apps('invalid_models_tests') class FilePathFieldTests(SimpleTestCase): def test_forbidden_files_and_folders(self): class Model(models.Model): field = models.FilePathField(allow_files=False, allow_folders=False) field = Model._meta.get_field('field') self.assertEqual(field.check(), [ Error( "FilePathFields must have either 'allow_files' or 'allow_folders' set to True.", obj=field, id='fields.E140', ), ]) @isolate_apps('invalid_models_tests') class GenericIPAddressFieldTests(SimpleTestCase): def test_non_nullable_blank(self): class Model(models.Model): field = models.GenericIPAddressField(null=False, blank=True) field = Model._meta.get_field('field') self.assertEqual(field.check(), [ Error( ('GenericIPAddressFields cannot have blank=True if null=False, ' 'as blank values are stored as nulls.'), obj=field, id='fields.E150', ), ]) @isolate_apps('invalid_models_tests') class ImageFieldTests(SimpleTestCase): def test_pillow_installed(self): try: from PIL import Image # NOQA except ImportError: pillow_installed = False else: pillow_installed = True class Model(models.Model): field = models.ImageField(upload_to='somewhere') field = Model._meta.get_field('field') errors = field.check() expected = [] if pillow_installed else [ Error( 'Cannot use ImageField because Pillow is not installed.', hint=('Get Pillow at https://pypi.org/project/Pillow/ ' 'or run command "python -m pip install Pillow".'), obj=field, id='fields.E210', ), ] self.assertEqual(errors, expected) @isolate_apps('invalid_models_tests') class IntegerFieldTests(SimpleTestCase): def test_max_length_warning(self): class Model(models.Model): integer = models.IntegerField(max_length=2) biginteger = models.BigIntegerField(max_length=2) smallinteger = models.SmallIntegerField(max_length=2) positiveinteger = models.PositiveIntegerField(max_length=2) positivebiginteger = models.PositiveBigIntegerField(max_length=2) positivesmallinteger = models.PositiveSmallIntegerField(max_length=2) for field in Model._meta.get_fields(): if field.auto_created: continue with self.subTest(name=field.name): self.assertEqual(field.check(), [ DjangoWarning( "'max_length' is ignored when used with %s." % field.__class__.__name__, hint="Remove 'max_length' from field", obj=field, id='fields.W122', ) ]) @isolate_apps('invalid_models_tests') class TimeFieldTests(SimpleTestCase): maxDiff = None def test_fix_default_value(self): class Model(models.Model): field_dt = models.TimeField(default=now()) field_t = models.TimeField(default=now().time()) field_now = models.DateField(default=now) field_dt = Model._meta.get_field('field_dt') field_t = Model._meta.get_field('field_t') field_now = Model._meta.get_field('field_now') errors = field_dt.check() errors.extend(field_t.check()) errors.extend(field_now.check()) # doesn't raise a warning self.assertEqual(errors, [ DjangoWarning( 'Fixed default value provided.', hint='It seems you set a fixed date / time / datetime ' 'value as default for this field. This may not be ' 'what you want. If you want to have the current date ' 'as default, use `django.utils.timezone.now`', obj=field_dt, id='fields.W161', ), DjangoWarning( 'Fixed default value provided.', hint='It seems you set a fixed date / time / datetime ' 'value as default for this field. This may not be ' 'what you want. If you want to have the current date ' 'as default, use `django.utils.timezone.now`', obj=field_t, id='fields.W161', ) ]) @override_settings(USE_TZ=True) def test_fix_default_value_tz(self): self.test_fix_default_value() @isolate_apps('invalid_models_tests') class TextFieldTests(TestCase): @skipIfDBFeature('supports_index_on_text_field') def test_max_length_warning(self): class Model(models.Model): value = models.TextField(db_index=True) field = Model._meta.get_field('value') field_type = field.db_type(connection) self.assertEqual(field.check(), [ DjangoWarning( '%s does not support a database index on %s columns.' % (connection.display_name, field_type), hint=( "An index won't be created. Silence this warning if you " "don't care about it." ), obj=field, id='fields.W162', ) ]) @isolate_apps('invalid_models_tests') class UUIDFieldTests(TestCase): def test_choices_named_group(self): class Model(models.Model): field = models.UUIDField( choices=[ ['knights', [ [uuid.UUID('5c859437-d061-4847-b3f7-e6b78852f8c8'), 'Lancelot'], [uuid.UUID('c7853ec1-2ea3-4359-b02d-b54e8f1bcee2'), 'Galahad'], ]], [uuid.UUID('25d405be-4895-4d50-9b2e-d6695359ce47'), 'Other'], ], ) self.assertEqual(Model._meta.get_field('field').check(), [])
959d92fea3c957a08b3161f45d5e7c8b544fe947a2a198f17b73bc43faaa28e7
import os import pickle import sys import tempfile import unittest from pathlib import Path from django.core.files import File, temp from django.core.files.base import ContentFile from django.core.files.uploadedfile import TemporaryUploadedFile from django.db.utils import IntegrityError from django.test import TestCase, override_settings from .models import Document class FileFieldTests(TestCase): def test_clearable(self): """ FileField.save_form_data() will clear its instance attribute value if passed False. """ d = Document(myfile='something.txt') self.assertEqual(d.myfile, 'something.txt') field = d._meta.get_field('myfile') field.save_form_data(d, False) self.assertEqual(d.myfile, '') def test_unchanged(self): """ FileField.save_form_data() considers None to mean "no change" rather than "clear". """ d = Document(myfile='something.txt') self.assertEqual(d.myfile, 'something.txt') field = d._meta.get_field('myfile') field.save_form_data(d, None) self.assertEqual(d.myfile, 'something.txt') def test_changed(self): """ FileField.save_form_data(), if passed a truthy value, updates its instance attribute. """ d = Document(myfile='something.txt') self.assertEqual(d.myfile, 'something.txt') field = d._meta.get_field('myfile') field.save_form_data(d, 'else.txt') self.assertEqual(d.myfile, 'else.txt') def test_delete_when_file_unset(self): """ Calling delete on an unset FileField should not call the file deletion process, but fail silently (#20660). """ d = Document() d.myfile.delete() def test_refresh_from_db(self): d = Document.objects.create(myfile='something.txt') d.refresh_from_db() self.assertIs(d.myfile.instance, d) def test_defer(self): Document.objects.create(myfile='something.txt') self.assertEqual(Document.objects.defer('myfile')[0].myfile, 'something.txt') def test_unique_when_same_filename(self): """ A FileField with unique=True shouldn't allow two instances with the same name to be saved. """ Document.objects.create(myfile='something.txt') with self.assertRaises(IntegrityError): Document.objects.create(myfile='something.txt') @unittest.skipIf(sys.platform == 'win32', "Windows doesn't support moving open files.") # The file's source and destination must be on the same filesystem. @override_settings(MEDIA_ROOT=temp.gettempdir()) def test_move_temporary_file(self): """ The temporary uploaded file is moved rather than copied to the destination. """ with TemporaryUploadedFile('something.txt', 'text/plain', 0, 'UTF-8') as tmp_file: tmp_file_path = tmp_file.temporary_file_path() Document.objects.create(myfile=tmp_file) self.assertFalse(os.path.exists(tmp_file_path), 'Temporary file still exists') def test_open_returns_self(self): """ FieldField.open() returns self so it can be used as a context manager. """ d = Document.objects.create(myfile='something.txt') # Replace the FileField's file with an in-memory ContentFile, so that # open() doesn't write to disk. d.myfile.file = ContentFile(b'', name='bla') self.assertEqual(d.myfile, d.myfile.open()) def test_media_root_pathlib(self): with tempfile.TemporaryDirectory() as tmp_dir: with override_settings(MEDIA_ROOT=Path(tmp_dir)): with TemporaryUploadedFile('foo.txt', 'text/plain', 1, 'utf-8') as tmp_file: Document.objects.create(myfile=tmp_file) self.assertTrue(os.path.exists(os.path.join(tmp_dir, 'unused', 'foo.txt'))) def test_pickle(self): with open(__file__, 'rb') as fp: file1 = File(fp, name='test_file.py') document = Document(myfile='test_file.py') document.myfile.save('test_file.py', file1) try: dump = pickle.dumps(document) loaded_document = pickle.loads(dump) self.assertEqual(document.myfile, loaded_document.myfile) self.assertEqual(document.myfile.url, loaded_document.myfile.url) self.assertEqual( document.myfile.storage, loaded_document.myfile.storage, ) self.assertEqual( document.myfile.instance, loaded_document.myfile.instance, ) self.assertEqual(document.myfile.field, loaded_document.myfile.field) myfile_dump = pickle.dumps(document.myfile) loaded_myfile = pickle.loads(myfile_dump) self.assertEqual(document.myfile, loaded_myfile) self.assertEqual(document.myfile.url, loaded_myfile.url) self.assertEqual(document.myfile.storage, loaded_myfile.storage) self.assertEqual(document.myfile.instance, loaded_myfile.instance) self.assertEqual(document.myfile.field, loaded_myfile.field) finally: document.myfile.delete()
10c4098f66acc8388fcb140431bfc2f3ab124492b716302bce6f0e54135f4520
import os import shutil from unittest import skipIf from django.core.exceptions import ImproperlyConfigured from django.core.files import File from django.core.files.images import ImageFile from django.test import TestCase from django.test.testcases import SerializeMixin try: from .models import Image except ImproperlyConfigured: Image = None if Image: from .models import ( Person, PersonWithHeight, PersonWithHeightAndWidth, PersonDimensionsFirst, PersonTwoImages, TestImageFieldFile, ) from .models import temp_storage_dir else: # Pillow not available, create dummy classes (tests will be skipped anyway) class Person: pass PersonWithHeight = PersonWithHeightAndWidth = PersonDimensionsFirst = Person PersonTwoImages = Person class ImageFieldTestMixin(SerializeMixin): """ Mixin class to provide common functionality to ImageField test classes. """ lockfile = __file__ # Person model to use for tests. PersonModel = PersonWithHeightAndWidth # File class to use for file instances. File = ImageFile def setUp(self): """ Creates a pristine temp directory (or deletes and recreates if it already exists) that the model uses as its storage directory. Sets up two ImageFile instances for use in tests. """ if os.path.exists(temp_storage_dir): shutil.rmtree(temp_storage_dir) os.mkdir(temp_storage_dir) file_path1 = os.path.join(os.path.dirname(__file__), '4x8.png') self.file1 = self.File(open(file_path1, 'rb'), name='4x8.png') file_path2 = os.path.join(os.path.dirname(__file__), '8x4.png') self.file2 = self.File(open(file_path2, 'rb'), name='8x4.png') def tearDown(self): """ Removes temp directory and all its contents. """ self.file1.close() self.file2.close() shutil.rmtree(temp_storage_dir) def check_dimensions(self, instance, width, height, field_name='mugshot'): """ Asserts that the given width and height values match both the field's height and width attributes and the height and width fields (if defined) the image field is caching to. Note, this method will check for dimension fields named by adding "_width" or "_height" to the name of the ImageField. So, the models used in these tests must have their fields named accordingly. By default, we check the field named "mugshot", but this can be specified by passing the field_name parameter. """ field = getattr(instance, field_name) # Check height/width attributes of field. if width is None and height is None: with self.assertRaises(ValueError): getattr(field, 'width') with self.assertRaises(ValueError): getattr(field, 'height') else: self.assertEqual(field.width, width) self.assertEqual(field.height, height) # Check height/width fields of model, if defined. width_field_name = field_name + '_width' if hasattr(instance, width_field_name): self.assertEqual(getattr(instance, width_field_name), width) height_field_name = field_name + '_height' if hasattr(instance, height_field_name): self.assertEqual(getattr(instance, height_field_name), height) @skipIf(Image is None, "Pillow is required to test ImageField") class ImageFieldTests(ImageFieldTestMixin, TestCase): """ Tests for ImageField that don't need to be run with each of the different test model classes. """ def test_equal_notequal_hash(self): """ Bug #9786: Ensure '==' and '!=' work correctly. Bug #9508: make sure hash() works as expected (equal items must hash to the same value). """ # Create two Persons with different mugshots. p1 = self.PersonModel(name="Joe") p1.mugshot.save("mug", self.file1) p2 = self.PersonModel(name="Bob") p2.mugshot.save("mug", self.file2) self.assertIs(p1.mugshot == p2.mugshot, False) self.assertIs(p1.mugshot != p2.mugshot, True) # Test again with an instance fetched from the db. p1_db = self.PersonModel.objects.get(name="Joe") self.assertIs(p1_db.mugshot == p2.mugshot, False) self.assertIs(p1_db.mugshot != p2.mugshot, True) # Instance from db should match the local instance. self.assertIs(p1_db.mugshot == p1.mugshot, True) self.assertEqual(hash(p1_db.mugshot), hash(p1.mugshot)) self.assertIs(p1_db.mugshot != p1.mugshot, False) def test_instantiate_missing(self): """ If the underlying file is unavailable, still create instantiate the object without error. """ p = self.PersonModel(name="Joan") p.mugshot.save("shot", self.file1) p = self.PersonModel.objects.get(name="Joan") path = p.mugshot.path shutil.move(path, path + '.moved') self.PersonModel.objects.get(name="Joan") def test_delete_when_missing(self): """ Bug #8175: correctly delete an object where the file no longer exists on the file system. """ p = self.PersonModel(name="Fred") p.mugshot.save("shot", self.file1) os.remove(p.mugshot.path) p.delete() def test_size_method(self): """ Bug #8534: FileField.size should not leave the file open. """ p = self.PersonModel(name="Joan") p.mugshot.save("shot", self.file1) # Get a "clean" model instance p = self.PersonModel.objects.get(name="Joan") # It won't have an opened file. self.assertIs(p.mugshot.closed, True) # After asking for the size, the file should still be closed. p.mugshot.size self.assertIs(p.mugshot.closed, True) def test_pickle(self): """ ImageField can be pickled, unpickled, and that the image of the unpickled version is the same as the original. """ import pickle p = Person(name="Joe") p.mugshot.save("mug", self.file1) dump = pickle.dumps(p) loaded_p = pickle.loads(dump) self.assertEqual(p.mugshot, loaded_p.mugshot) self.assertEqual(p.mugshot.url, loaded_p.mugshot.url) self.assertEqual(p.mugshot.storage, loaded_p.mugshot.storage) self.assertEqual(p.mugshot.instance, loaded_p.mugshot.instance) self.assertEqual(p.mugshot.field, loaded_p.mugshot.field) mugshot_dump = pickle.dumps(p.mugshot) loaded_mugshot = pickle.loads(mugshot_dump) self.assertEqual(p.mugshot, loaded_mugshot) self.assertEqual(p.mugshot.url, loaded_mugshot.url) self.assertEqual(p.mugshot.storage, loaded_mugshot.storage) self.assertEqual(p.mugshot.instance, loaded_mugshot.instance) self.assertEqual(p.mugshot.field, loaded_mugshot.field) def test_defer(self): self.PersonModel.objects.create(name='Joe', mugshot=self.file1) with self.assertNumQueries(1): qs = list(self.PersonModel.objects.defer('mugshot')) with self.assertNumQueries(0): self.assertEqual(qs[0].name, 'Joe') @skipIf(Image is None, "Pillow is required to test ImageField") class ImageFieldTwoDimensionsTests(ImageFieldTestMixin, TestCase): """ Tests behavior of an ImageField and its dimensions fields. """ def test_constructor(self): """ Tests assigning an image field through the model's constructor. """ p = self.PersonModel(name='Joe', mugshot=self.file1) self.check_dimensions(p, 4, 8) p.save() self.check_dimensions(p, 4, 8) def test_image_after_constructor(self): """ Tests behavior when image is not passed in constructor. """ p = self.PersonModel(name='Joe') # TestImageField value will default to being an instance of its # attr_class, a TestImageFieldFile, with name == None, which will # cause it to evaluate as False. self.assertIsInstance(p.mugshot, TestImageFieldFile) self.assertFalse(p.mugshot) # Test setting a fresh created model instance. p = self.PersonModel(name='Joe') p.mugshot = self.file1 self.check_dimensions(p, 4, 8) def test_create(self): """ Tests assigning an image in Manager.create(). """ p = self.PersonModel.objects.create(name='Joe', mugshot=self.file1) self.check_dimensions(p, 4, 8) def test_default_value(self): """ The default value for an ImageField is an instance of the field's attr_class (TestImageFieldFile in this case) with no name (name set to None). """ p = self.PersonModel() self.assertIsInstance(p.mugshot, TestImageFieldFile) self.assertFalse(p.mugshot) def test_assignment_to_None(self): """ Assigning ImageField to None clears dimensions. """ p = self.PersonModel(name='Joe', mugshot=self.file1) self.check_dimensions(p, 4, 8) # If image assigned to None, dimension fields should be cleared. p.mugshot = None self.check_dimensions(p, None, None) p.mugshot = self.file2 self.check_dimensions(p, 8, 4) def test_field_save_and_delete_methods(self): """ Tests assignment using the field's save method and deletion using the field's delete method. """ p = self.PersonModel(name='Joe') p.mugshot.save("mug", self.file1) self.check_dimensions(p, 4, 8) # A new file should update dimensions. p.mugshot.save("mug", self.file2) self.check_dimensions(p, 8, 4) # Field and dimensions should be cleared after a delete. p.mugshot.delete(save=False) self.assertIsNone(p.mugshot.name) self.check_dimensions(p, None, None) def test_dimensions(self): """ Dimensions are updated correctly in various situations. """ p = self.PersonModel(name='Joe') # Dimensions should get set if file is saved. p.mugshot.save("mug", self.file1) self.check_dimensions(p, 4, 8) # Test dimensions after fetching from database. p = self.PersonModel.objects.get(name='Joe') # Bug 11084: Dimensions should not get recalculated if file is # coming from the database. We test this by checking if the file # was opened. self.assertIs(p.mugshot.was_opened, False) self.check_dimensions(p, 4, 8) # After checking dimensions on the image field, the file will have # opened. self.assertIs(p.mugshot.was_opened, True) # Dimensions should now be cached, and if we reset was_opened and # check dimensions again, the file should not have opened. p.mugshot.was_opened = False self.check_dimensions(p, 4, 8) self.assertIs(p.mugshot.was_opened, False) # If we assign a new image to the instance, the dimensions should # update. p.mugshot = self.file2 self.check_dimensions(p, 8, 4) # Dimensions were recalculated, and hence file should have opened. self.assertIs(p.mugshot.was_opened, True) @skipIf(Image is None, "Pillow is required to test ImageField") class ImageFieldNoDimensionsTests(ImageFieldTwoDimensionsTests): """ Tests behavior of an ImageField with no dimension fields. """ PersonModel = Person @skipIf(Image is None, "Pillow is required to test ImageField") class ImageFieldOneDimensionTests(ImageFieldTwoDimensionsTests): """ Tests behavior of an ImageField with one dimensions field. """ PersonModel = PersonWithHeight @skipIf(Image is None, "Pillow is required to test ImageField") class ImageFieldDimensionsFirstTests(ImageFieldTwoDimensionsTests): """ Tests behavior of an ImageField where the dimensions fields are defined before the ImageField. """ PersonModel = PersonDimensionsFirst @skipIf(Image is None, "Pillow is required to test ImageField") class ImageFieldUsingFileTests(ImageFieldTwoDimensionsTests): """ Tests behavior of an ImageField when assigning it a File instance rather than an ImageFile instance. """ PersonModel = PersonDimensionsFirst File = File @skipIf(Image is None, "Pillow is required to test ImageField") class TwoImageFieldTests(ImageFieldTestMixin, TestCase): """ Tests a model with two ImageFields. """ PersonModel = PersonTwoImages def test_constructor(self): p = self.PersonModel(mugshot=self.file1, headshot=self.file2) self.check_dimensions(p, 4, 8, 'mugshot') self.check_dimensions(p, 8, 4, 'headshot') p.save() self.check_dimensions(p, 4, 8, 'mugshot') self.check_dimensions(p, 8, 4, 'headshot') def test_create(self): p = self.PersonModel.objects.create(mugshot=self.file1, headshot=self.file2) self.check_dimensions(p, 4, 8) self.check_dimensions(p, 8, 4, 'headshot') def test_assignment(self): p = self.PersonModel() self.check_dimensions(p, None, None, 'mugshot') self.check_dimensions(p, None, None, 'headshot') p.mugshot = self.file1 self.check_dimensions(p, 4, 8, 'mugshot') self.check_dimensions(p, None, None, 'headshot') p.headshot = self.file2 self.check_dimensions(p, 4, 8, 'mugshot') self.check_dimensions(p, 8, 4, 'headshot') # Clear the ImageFields one at a time. p.mugshot = None self.check_dimensions(p, None, None, 'mugshot') self.check_dimensions(p, 8, 4, 'headshot') p.headshot = None self.check_dimensions(p, None, None, 'mugshot') self.check_dimensions(p, None, None, 'headshot') def test_field_save_and_delete_methods(self): p = self.PersonModel(name='Joe') p.mugshot.save("mug", self.file1) self.check_dimensions(p, 4, 8, 'mugshot') self.check_dimensions(p, None, None, 'headshot') p.headshot.save("head", self.file2) self.check_dimensions(p, 4, 8, 'mugshot') self.check_dimensions(p, 8, 4, 'headshot') # We can use save=True when deleting the image field with null=True # dimension fields and the other field has an image. p.headshot.delete(save=True) self.check_dimensions(p, 4, 8, 'mugshot') self.check_dimensions(p, None, None, 'headshot') p.mugshot.delete(save=False) self.check_dimensions(p, None, None, 'mugshot') self.check_dimensions(p, None, None, 'headshot') def test_dimensions(self): """ Dimensions are updated correctly in various situations. """ p = self.PersonModel(name='Joe') # Dimensions should get set for the saved file. p.mugshot.save("mug", self.file1) p.headshot.save("head", self.file2) self.check_dimensions(p, 4, 8, 'mugshot') self.check_dimensions(p, 8, 4, 'headshot') # Test dimensions after fetching from database. p = self.PersonModel.objects.get(name='Joe') # Bug 11084: Dimensions should not get recalculated if file is # coming from the database. We test this by checking if the file # was opened. self.assertIs(p.mugshot.was_opened, False) self.assertIs(p.headshot.was_opened, False) self.check_dimensions(p, 4, 8, 'mugshot') self.check_dimensions(p, 8, 4, 'headshot') # After checking dimensions on the image fields, the files will # have been opened. self.assertIs(p.mugshot.was_opened, True) self.assertIs(p.headshot.was_opened, True) # Dimensions should now be cached, and if we reset was_opened and # check dimensions again, the file should not have opened. p.mugshot.was_opened = False p.headshot.was_opened = False self.check_dimensions(p, 4, 8, 'mugshot') self.check_dimensions(p, 8, 4, 'headshot') self.assertIs(p.mugshot.was_opened, False) self.assertIs(p.headshot.was_opened, False) # If we assign a new image to the instance, the dimensions should # update. p.mugshot = self.file2 p.headshot = self.file1 self.check_dimensions(p, 8, 4, 'mugshot') self.check_dimensions(p, 4, 8, 'headshot') # Dimensions were recalculated, and hence file should have opened. self.assertIs(p.mugshot.was_opened, True) self.assertIs(p.headshot.was_opened, True)
ac777bb41898f4c8cad0d2d097e4783695df43e60e4f3180db99c5029bf40935
import datetime import pickle import unittest import uuid from copy import deepcopy from unittest import mock from django.core.exceptions import FieldError from django.db import DatabaseError, connection, models from django.db.models import CharField, Q, TimeField, UUIDField from django.db.models.aggregates import ( Avg, Count, Max, Min, StdDev, Sum, Variance, ) from django.db.models.expressions import ( Case, Col, Combinable, Exists, Expression, ExpressionList, ExpressionWrapper, F, Func, OrderBy, OuterRef, Random, RawSQL, Ref, Subquery, Value, When, ) from django.db.models.functions import ( Coalesce, Concat, Length, Lower, Substr, Upper, ) from django.db.models.sql import constants from django.db.models.sql.datastructures import Join from django.test import SimpleTestCase, TestCase, skipUnlessDBFeature from django.test.utils import Approximate, isolate_apps from .models import ( UUID, UUIDPK, Company, Employee, Experiment, Number, RemoteEmployee, Result, SimulationRun, Time, ) class BasicExpressionsTests(TestCase): @classmethod def setUpTestData(cls): cls.example_inc = Company.objects.create( name="Example Inc.", num_employees=2300, num_chairs=5, ceo=Employee.objects.create(firstname="Joe", lastname="Smith", salary=10) ) cls.foobar_ltd = Company.objects.create( name="Foobar Ltd.", num_employees=3, num_chairs=4, based_in_eu=True, ceo=Employee.objects.create(firstname="Frank", lastname="Meyer", salary=20) ) cls.max = Employee.objects.create(firstname='Max', lastname='Mustermann', salary=30) cls.gmbh = Company.objects.create(name='Test GmbH', num_employees=32, num_chairs=1, ceo=cls.max) def setUp(self): self.company_query = Company.objects.values( "name", "num_employees", "num_chairs" ).order_by( "name", "num_employees", "num_chairs" ) def test_annotate_values_aggregate(self): companies = Company.objects.annotate( salaries=F('ceo__salary'), ).values('num_employees', 'salaries').aggregate( result=Sum( F('salaries') + F('num_employees'), output_field=models.IntegerField() ), ) self.assertEqual(companies['result'], 2395) def test_annotate_values_filter(self): companies = Company.objects.annotate( foo=RawSQL('%s', ['value']), ).filter(foo='value').order_by('name') self.assertQuerysetEqual( companies, ['<Company: Example Inc.>', '<Company: Foobar Ltd.>', '<Company: Test GmbH>'], ) def test_annotate_values_count(self): companies = Company.objects.annotate(foo=RawSQL('%s', ['value'])) self.assertEqual(companies.count(), 3) @skipUnlessDBFeature('supports_boolean_expr_in_select_clause') def test_filtering_on_annotate_that_uses_q(self): self.assertEqual( Company.objects.annotate( num_employees_check=ExpressionWrapper(Q(num_employees__gt=3), output_field=models.BooleanField()) ).filter(num_employees_check=True).count(), 2, ) def test_filtering_on_q_that_is_boolean(self): self.assertEqual( Company.objects.filter( ExpressionWrapper(Q(num_employees__gt=3), output_field=models.BooleanField()) ).count(), 2, ) def test_filtering_on_rawsql_that_is_boolean(self): self.assertEqual( Company.objects.filter( RawSQL('num_employees > %s', (3,), output_field=models.BooleanField()), ).count(), 2, ) def test_filter_inter_attribute(self): # We can filter on attribute relationships on same model obj, e.g. # find companies where the number of employees is greater # than the number of chairs. self.assertSequenceEqual( self.company_query.filter(num_employees__gt=F("num_chairs")), [ { "num_chairs": 5, "name": "Example Inc.", "num_employees": 2300, }, { "num_chairs": 1, "name": "Test GmbH", "num_employees": 32 }, ], ) def test_update(self): # We can set one field to have the value of another field # Make sure we have enough chairs self.company_query.update(num_chairs=F("num_employees")) self.assertSequenceEqual( self.company_query, [ { "num_chairs": 2300, "name": "Example Inc.", "num_employees": 2300 }, { "num_chairs": 3, "name": "Foobar Ltd.", "num_employees": 3 }, { "num_chairs": 32, "name": "Test GmbH", "num_employees": 32 } ], ) def test_arithmetic(self): # We can perform arithmetic operations in expressions # Make sure we have 2 spare chairs self.company_query.update(num_chairs=F("num_employees") + 2) self.assertSequenceEqual( self.company_query, [ { 'num_chairs': 2302, 'name': 'Example Inc.', 'num_employees': 2300 }, { 'num_chairs': 5, 'name': 'Foobar Ltd.', 'num_employees': 3 }, { 'num_chairs': 34, 'name': 'Test GmbH', 'num_employees': 32 } ], ) def test_order_of_operations(self): # Law of order of operations is followed self.company_query.update(num_chairs=F('num_employees') + 2 * F('num_employees')) self.assertSequenceEqual( self.company_query, [ { 'num_chairs': 6900, 'name': 'Example Inc.', 'num_employees': 2300 }, { 'num_chairs': 9, 'name': 'Foobar Ltd.', 'num_employees': 3 }, { 'num_chairs': 96, 'name': 'Test GmbH', 'num_employees': 32 } ], ) def test_parenthesis_priority(self): # Law of order of operations can be overridden by parentheses self.company_query.update(num_chairs=(F('num_employees') + 2) * F('num_employees')) self.assertSequenceEqual( self.company_query, [ { 'num_chairs': 5294600, 'name': 'Example Inc.', 'num_employees': 2300 }, { 'num_chairs': 15, 'name': 'Foobar Ltd.', 'num_employees': 3 }, { 'num_chairs': 1088, 'name': 'Test GmbH', 'num_employees': 32 } ], ) def test_update_with_fk(self): # ForeignKey can become updated with the value of another ForeignKey. self.assertEqual(Company.objects.update(point_of_contact=F('ceo')), 3) self.assertQuerysetEqual( Company.objects.all(), ['Joe Smith', 'Frank Meyer', 'Max Mustermann'], lambda c: str(c.point_of_contact), ordered=False ) def test_update_with_none(self): Number.objects.create(integer=1, float=1.0) Number.objects.create(integer=2) Number.objects.filter(float__isnull=False).update(float=Value(None)) self.assertQuerysetEqual( Number.objects.all(), [None, None], lambda n: n.float, ordered=False ) def test_filter_with_join(self): # F Expressions can also span joins Company.objects.update(point_of_contact=F('ceo')) c = Company.objects.first() c.point_of_contact = Employee.objects.create(firstname="Guido", lastname="van Rossum") c.save() self.assertQuerysetEqual( Company.objects.filter(ceo__firstname=F('point_of_contact__firstname')), ['Foobar Ltd.', 'Test GmbH'], lambda c: c.name, ordered=False ) Company.objects.exclude( ceo__firstname=F("point_of_contact__firstname") ).update(name="foo") self.assertEqual( Company.objects.exclude( ceo__firstname=F('point_of_contact__firstname') ).get().name, "foo", ) msg = "Joined field references are not permitted in this query" with self.assertRaisesMessage(FieldError, msg): Company.objects.exclude( ceo__firstname=F('point_of_contact__firstname') ).update(name=F('point_of_contact__lastname')) def test_object_update(self): # F expressions can be used to update attributes on single objects self.gmbh.num_employees = F('num_employees') + 4 self.gmbh.save() self.gmbh.refresh_from_db() self.assertEqual(self.gmbh.num_employees, 36) def test_new_object_save(self): # We should be able to use Funcs when inserting new data test_co = Company(name=Lower(Value('UPPER')), num_employees=32, num_chairs=1, ceo=self.max) test_co.save() test_co.refresh_from_db() self.assertEqual(test_co.name, "upper") def test_new_object_create(self): test_co = Company.objects.create(name=Lower(Value('UPPER')), num_employees=32, num_chairs=1, ceo=self.max) test_co.refresh_from_db() self.assertEqual(test_co.name, "upper") def test_object_create_with_aggregate(self): # Aggregates are not allowed when inserting new data msg = 'Aggregate functions are not allowed in this query (num_employees=Max(Value(1))).' with self.assertRaisesMessage(FieldError, msg): Company.objects.create( name='Company', num_employees=Max(Value(1)), num_chairs=1, ceo=Employee.objects.create(firstname="Just", lastname="Doit", salary=30), ) def test_object_update_fk(self): # F expressions cannot be used to update attributes which are foreign # keys, or attributes which involve joins. test_gmbh = Company.objects.get(pk=self.gmbh.pk) msg = 'F(ceo)": "Company.point_of_contact" must be a "Employee" instance.' with self.assertRaisesMessage(ValueError, msg): test_gmbh.point_of_contact = F('ceo') test_gmbh.point_of_contact = self.gmbh.ceo test_gmbh.save() test_gmbh.name = F('ceo__lastname') msg = 'Joined field references are not permitted in this query' with self.assertRaisesMessage(FieldError, msg): test_gmbh.save() def test_update_inherited_field_value(self): msg = 'Joined field references are not permitted in this query' with self.assertRaisesMessage(FieldError, msg): RemoteEmployee.objects.update(adjusted_salary=F('salary') * 5) def test_object_update_unsaved_objects(self): # F expressions cannot be used to update attributes on objects which do # not yet exist in the database acme = Company(name='The Acme Widget Co.', num_employees=12, num_chairs=5, ceo=self.max) acme.num_employees = F("num_employees") + 16 msg = ( 'Failed to insert expression "Col(expressions_company, ' 'expressions.Company.num_employees) + Value(16)" on ' 'expressions.Company.num_employees. F() expressions can only be ' 'used to update, not to insert.' ) with self.assertRaisesMessage(ValueError, msg): acme.save() acme.num_employees = 12 acme.name = Lower(F('name')) msg = ( 'Failed to insert expression "Lower(Col(expressions_company, ' 'expressions.Company.name))" on expressions.Company.name. F() ' 'expressions can only be used to update, not to insert.' ) with self.assertRaisesMessage(ValueError, msg): acme.save() def test_ticket_11722_iexact_lookup(self): Employee.objects.create(firstname="John", lastname="Doe") Employee.objects.create(firstname="Test", lastname="test") queryset = Employee.objects.filter(firstname__iexact=F('lastname')) self.assertQuerysetEqual(queryset, ["<Employee: Test test>"]) def test_ticket_16731_startswith_lookup(self): Employee.objects.create(firstname="John", lastname="Doe") e2 = Employee.objects.create(firstname="Jack", lastname="Jackson") e3 = Employee.objects.create(firstname="Jack", lastname="jackson") self.assertSequenceEqual( Employee.objects.filter(lastname__startswith=F('firstname')), [e2, e3] if connection.features.has_case_insensitive_like else [e2] ) qs = Employee.objects.filter(lastname__istartswith=F('firstname')).order_by('pk') self.assertSequenceEqual(qs, [e2, e3]) def test_ticket_18375_join_reuse(self): # Reverse multijoin F() references and the lookup target the same join. # Pre #18375 the F() join was generated first and the lookup couldn't # reuse that join. qs = Employee.objects.filter(company_ceo_set__num_chairs=F('company_ceo_set__num_employees')) self.assertEqual(str(qs.query).count('JOIN'), 1) def test_ticket_18375_kwarg_ordering(self): # The next query was dict-randomization dependent - if the "gte=1" # was seen first, then the F() will reuse the join generated by the # gte lookup, if F() was seen first, then it generated a join the # other lookups could not reuse. qs = Employee.objects.filter( company_ceo_set__num_chairs=F('company_ceo_set__num_employees'), company_ceo_set__num_chairs__gte=1, ) self.assertEqual(str(qs.query).count('JOIN'), 1) def test_ticket_18375_kwarg_ordering_2(self): # Another similar case for F() than above. Now we have the same join # in two filter kwargs, one in the lhs lookup, one in F. Here pre # #18375 the amount of joins generated was random if dict # randomization was enabled, that is the generated query dependent # on which clause was seen first. qs = Employee.objects.filter( company_ceo_set__num_employees=F('pk'), pk=F('company_ceo_set__num_employees') ) self.assertEqual(str(qs.query).count('JOIN'), 1) def test_ticket_18375_chained_filters(self): # F() expressions do not reuse joins from previous filter. qs = Employee.objects.filter( company_ceo_set__num_employees=F('pk') ).filter( company_ceo_set__num_employees=F('company_ceo_set__num_employees') ) self.assertEqual(str(qs.query).count('JOIN'), 2) def test_order_by_exists(self): mary = Employee.objects.create(firstname='Mary', lastname='Mustermann', salary=20) mustermanns_by_seniority = Employee.objects.filter(lastname='Mustermann').order_by( # Order by whether the employee is the CEO of a company Exists(Company.objects.filter(ceo=OuterRef('pk'))).desc() ) self.assertSequenceEqual(mustermanns_by_seniority, [self.max, mary]) def test_order_by_multiline_sql(self): raw_order_by = ( RawSQL(''' CASE WHEN num_employees > 1000 THEN num_chairs ELSE 0 END ''', []).desc(), RawSQL(''' CASE WHEN num_chairs > 1 THEN 1 ELSE 0 END ''', []).asc() ) for qs in ( Company.objects.all(), Company.objects.distinct(), ): with self.subTest(qs=qs): self.assertSequenceEqual( qs.order_by(*raw_order_by), [self.example_inc, self.gmbh, self.foobar_ltd], ) def test_outerref(self): inner = Company.objects.filter(point_of_contact=OuterRef('pk')) msg = ( 'This queryset contains a reference to an outer query and may only ' 'be used in a subquery.' ) with self.assertRaisesMessage(ValueError, msg): inner.exists() outer = Employee.objects.annotate(is_point_of_contact=Exists(inner)) self.assertIs(outer.exists(), True) def test_exist_single_field_output_field(self): queryset = Company.objects.values('pk') self.assertIsInstance(Exists(queryset).output_field, models.BooleanField) def test_subquery(self): Company.objects.filter(name='Example Inc.').update( point_of_contact=Employee.objects.get(firstname='Joe', lastname='Smith'), ceo=self.max, ) Employee.objects.create(firstname='Bob', lastname='Brown', salary=40) qs = Employee.objects.annotate( is_point_of_contact=Exists(Company.objects.filter(point_of_contact=OuterRef('pk'))), is_not_point_of_contact=~Exists(Company.objects.filter(point_of_contact=OuterRef('pk'))), is_ceo_of_small_company=Exists(Company.objects.filter(num_employees__lt=200, ceo=OuterRef('pk'))), is_ceo_small_2=~~Exists(Company.objects.filter(num_employees__lt=200, ceo=OuterRef('pk'))), largest_company=Subquery(Company.objects.order_by('-num_employees').filter( models.Q(ceo=OuterRef('pk')) | models.Q(point_of_contact=OuterRef('pk')) ).values('name')[:1], output_field=models.CharField()) ).values( 'firstname', 'is_point_of_contact', 'is_not_point_of_contact', 'is_ceo_of_small_company', 'is_ceo_small_2', 'largest_company', ).order_by('firstname') results = list(qs) # Could use Coalesce(subq, Value('')) instead except for the bug in # cx_Oracle mentioned in #23843. bob = results[0] if bob['largest_company'] == '' and connection.features.interprets_empty_strings_as_nulls: bob['largest_company'] = None self.assertEqual(results, [ { 'firstname': 'Bob', 'is_point_of_contact': False, 'is_not_point_of_contact': True, 'is_ceo_of_small_company': False, 'is_ceo_small_2': False, 'largest_company': None, }, { 'firstname': 'Frank', 'is_point_of_contact': False, 'is_not_point_of_contact': True, 'is_ceo_of_small_company': True, 'is_ceo_small_2': True, 'largest_company': 'Foobar Ltd.', }, { 'firstname': 'Joe', 'is_point_of_contact': True, 'is_not_point_of_contact': False, 'is_ceo_of_small_company': False, 'is_ceo_small_2': False, 'largest_company': 'Example Inc.', }, { 'firstname': 'Max', 'is_point_of_contact': False, 'is_not_point_of_contact': True, 'is_ceo_of_small_company': True, 'is_ceo_small_2': True, 'largest_company': 'Example Inc.' } ]) # A less elegant way to write the same query: this uses a LEFT OUTER # JOIN and an IS NULL, inside a WHERE NOT IN which is probably less # efficient than EXISTS. self.assertCountEqual( qs.filter(is_point_of_contact=True).values('pk'), Employee.objects.exclude(company_point_of_contact_set=None).values('pk') ) def test_in_subquery(self): # This is a contrived test (and you really wouldn't write this query), # but it is a succinct way to test the __in=Subquery() construct. small_companies = Company.objects.filter(num_employees__lt=200).values('pk') subquery_test = Company.objects.filter(pk__in=Subquery(small_companies)) self.assertCountEqual(subquery_test, [self.foobar_ltd, self.gmbh]) subquery_test2 = Company.objects.filter(pk=Subquery(small_companies.filter(num_employees=3))) self.assertCountEqual(subquery_test2, [self.foobar_ltd]) def test_uuid_pk_subquery(self): u = UUIDPK.objects.create() UUID.objects.create(uuid_fk=u) qs = UUIDPK.objects.filter(id__in=Subquery(UUID.objects.values('uuid_fk__id'))) self.assertCountEqual(qs, [u]) def test_nested_subquery(self): inner = Company.objects.filter(point_of_contact=OuterRef('pk')) outer = Employee.objects.annotate(is_point_of_contact=Exists(inner)) contrived = Employee.objects.annotate( is_point_of_contact=Subquery( outer.filter(pk=OuterRef('pk')).values('is_point_of_contact'), output_field=models.BooleanField(), ), ) self.assertCountEqual(contrived.values_list(), outer.values_list()) def test_nested_subquery_join_outer_ref(self): inner = Employee.objects.filter(pk=OuterRef('ceo__pk')).values('pk') qs = Employee.objects.annotate( ceo_company=Subquery( Company.objects.filter( ceo__in=inner, ceo__pk=OuterRef('pk'), ).values('pk'), ), ) self.assertSequenceEqual( qs.values_list('ceo_company', flat=True), [self.example_inc.pk, self.foobar_ltd.pk, self.gmbh.pk], ) def test_nested_subquery_outer_ref_2(self): first = Time.objects.create(time='09:00') second = Time.objects.create(time='17:00') third = Time.objects.create(time='21:00') SimulationRun.objects.bulk_create([ SimulationRun(start=first, end=second, midpoint='12:00'), SimulationRun(start=first, end=third, midpoint='15:00'), SimulationRun(start=second, end=first, midpoint='00:00'), ]) inner = Time.objects.filter(time=OuterRef(OuterRef('time')), pk=OuterRef('start')).values('time') middle = SimulationRun.objects.annotate(other=Subquery(inner)).values('other')[:1] outer = Time.objects.annotate(other=Subquery(middle, output_field=models.TimeField())) # This is a contrived example. It exercises the double OuterRef form. self.assertCountEqual(outer, [first, second, third]) def test_nested_subquery_outer_ref_with_autofield(self): first = Time.objects.create(time='09:00') second = Time.objects.create(time='17:00') SimulationRun.objects.create(start=first, end=second, midpoint='12:00') inner = SimulationRun.objects.filter(start=OuterRef(OuterRef('pk'))).values('start') middle = Time.objects.annotate(other=Subquery(inner)).values('other')[:1] outer = Time.objects.annotate(other=Subquery(middle, output_field=models.IntegerField())) # This exercises the double OuterRef form with AutoField as pk. self.assertCountEqual(outer, [first, second]) def test_annotations_within_subquery(self): Company.objects.filter(num_employees__lt=50).update(ceo=Employee.objects.get(firstname='Frank')) inner = Company.objects.filter( ceo=OuterRef('pk') ).values('ceo').annotate(total_employees=models.Sum('num_employees')).values('total_employees') outer = Employee.objects.annotate(total_employees=Subquery(inner)).filter(salary__lte=Subquery(inner)) self.assertSequenceEqual( outer.order_by('-total_employees').values('salary', 'total_employees'), [{'salary': 10, 'total_employees': 2300}, {'salary': 20, 'total_employees': 35}], ) def test_subquery_references_joined_table_twice(self): inner = Company.objects.filter( num_chairs__gte=OuterRef('ceo__salary'), num_employees__gte=OuterRef('point_of_contact__salary'), ) # Another contrived example (there is no need to have a subquery here) outer = Company.objects.filter(pk__in=Subquery(inner.values('pk'))) self.assertFalse(outer.exists()) def test_subquery_filter_by_aggregate(self): Number.objects.create(integer=1000, float=1.2) Employee.objects.create(salary=1000) qs = Number.objects.annotate( min_valuable_count=Subquery( Employee.objects.filter( salary=OuterRef('integer'), ).annotate(cnt=Count('salary')).filter(cnt__gt=0).values('cnt')[:1] ), ) self.assertEqual(qs.get().float, 1.2) def test_aggregate_subquery_annotation(self): with self.assertNumQueries(1) as ctx: aggregate = Company.objects.annotate( ceo_salary=Subquery( Employee.objects.filter( id=OuterRef('ceo_id'), ).values('salary') ), ).aggregate( ceo_salary_gt_20=Count('pk', filter=Q(ceo_salary__gt=20)), ) self.assertEqual(aggregate, {'ceo_salary_gt_20': 1}) # Aggregation over a subquery annotation doesn't annotate the subquery # twice in the inner query. sql = ctx.captured_queries[0]['sql'] self.assertLessEqual(sql.count('SELECT'), 3) # GROUP BY isn't required to aggregate over a query that doesn't # contain nested aggregates. self.assertNotIn('GROUP BY', sql) def test_explicit_output_field(self): class FuncA(Func): output_field = models.CharField() class FuncB(Func): pass expr = FuncB(FuncA()) self.assertEqual(expr.output_field, FuncA.output_field) def test_outerref_mixed_case_table_name(self): inner = Result.objects.filter(result_time__gte=OuterRef('experiment__assigned')) outer = Result.objects.filter(pk__in=Subquery(inner.values('pk'))) self.assertFalse(outer.exists()) def test_outerref_with_operator(self): inner = Company.objects.filter(num_employees=OuterRef('ceo__salary') + 2) outer = Company.objects.filter(pk__in=Subquery(inner.values('pk'))) self.assertEqual(outer.get().name, 'Test GmbH') def test_annotation_with_outerref(self): gmbh_salary = Company.objects.annotate( max_ceo_salary_raise=Subquery( Company.objects.annotate( salary_raise=OuterRef('num_employees') + F('num_employees'), ).order_by('-salary_raise').values('salary_raise')[:1], output_field=models.IntegerField(), ), ).get(pk=self.gmbh.pk) self.assertEqual(gmbh_salary.max_ceo_salary_raise, 2332) def test_pickle_expression(self): expr = Value(1, output_field=models.IntegerField()) expr.convert_value # populate cached property self.assertEqual(pickle.loads(pickle.dumps(expr)), expr) def test_incorrect_field_in_F_expression(self): with self.assertRaisesMessage(FieldError, "Cannot resolve keyword 'nope' into field."): list(Employee.objects.filter(firstname=F('nope'))) def test_incorrect_joined_field_in_F_expression(self): with self.assertRaisesMessage(FieldError, "Cannot resolve keyword 'nope' into field."): list(Company.objects.filter(ceo__pk=F('point_of_contact__nope'))) def test_exists_in_filter(self): inner = Company.objects.filter(ceo=OuterRef('pk')).values('pk') qs1 = Employee.objects.filter(Exists(inner)) qs2 = Employee.objects.annotate(found=Exists(inner)).filter(found=True) self.assertCountEqual(qs1, qs2) self.assertFalse(Employee.objects.exclude(Exists(inner)).exists()) self.assertCountEqual(qs2, Employee.objects.exclude(~Exists(inner))) def test_subquery_in_filter(self): inner = Company.objects.filter(ceo=OuterRef('pk')).values('based_in_eu') self.assertSequenceEqual( Employee.objects.filter(Subquery(inner)), [self.foobar_ltd.ceo], ) def test_case_in_filter_if_boolean_output_field(self): is_ceo = Company.objects.filter(ceo=OuterRef('pk')) is_poc = Company.objects.filter(point_of_contact=OuterRef('pk')) qs = Employee.objects.filter( Case( When(Exists(is_ceo), then=True), When(Exists(is_poc), then=True), default=False, output_field=models.BooleanField(), ), ) self.assertSequenceEqual(qs, [self.example_inc.ceo, self.foobar_ltd.ceo, self.max]) def test_boolean_expression_combined(self): is_ceo = Company.objects.filter(ceo=OuterRef('pk')) is_poc = Company.objects.filter(point_of_contact=OuterRef('pk')) self.gmbh.point_of_contact = self.max self.gmbh.save() self.assertSequenceEqual( Employee.objects.filter(Exists(is_ceo) | Exists(is_poc)), [self.example_inc.ceo, self.foobar_ltd.ceo, self.max], ) self.assertSequenceEqual( Employee.objects.filter(Exists(is_ceo) & Exists(is_poc)), [self.max], ) self.assertSequenceEqual( Employee.objects.filter(Exists(is_ceo) & Q(salary__gte=30)), [self.max], ) self.assertSequenceEqual( Employee.objects.filter(Exists(is_poc) | Q(salary__lt=15)), [self.example_inc.ceo, self.max], ) class IterableLookupInnerExpressionsTests(TestCase): @classmethod def setUpTestData(cls): ceo = Employee.objects.create(firstname='Just', lastname='Doit', salary=30) # MySQL requires that the values calculated for expressions don't pass # outside of the field's range, so it's inconvenient to use the values # in the more general tests. Company.objects.create(name='5020 Ltd', num_employees=50, num_chairs=20, ceo=ceo) Company.objects.create(name='5040 Ltd', num_employees=50, num_chairs=40, ceo=ceo) Company.objects.create(name='5050 Ltd', num_employees=50, num_chairs=50, ceo=ceo) Company.objects.create(name='5060 Ltd', num_employees=50, num_chairs=60, ceo=ceo) Company.objects.create(name='99300 Ltd', num_employees=99, num_chairs=300, ceo=ceo) def test_in_lookup_allows_F_expressions_and_expressions_for_integers(self): # __in lookups can use F() expressions for integers. queryset = Company.objects.filter(num_employees__in=([F('num_chairs') - 10])) self.assertQuerysetEqual(queryset, ['<Company: 5060 Ltd>'], ordered=False) self.assertQuerysetEqual( Company.objects.filter(num_employees__in=([F('num_chairs') - 10, F('num_chairs') + 10])), ['<Company: 5040 Ltd>', '<Company: 5060 Ltd>'], ordered=False ) self.assertQuerysetEqual( Company.objects.filter( num_employees__in=([F('num_chairs') - 10, F('num_chairs'), F('num_chairs') + 10]) ), ['<Company: 5040 Ltd>', '<Company: 5050 Ltd>', '<Company: 5060 Ltd>'], ordered=False ) def test_expressions_in_lookups_join_choice(self): midpoint = datetime.time(13, 0) t1 = Time.objects.create(time=datetime.time(12, 0)) t2 = Time.objects.create(time=datetime.time(14, 0)) SimulationRun.objects.create(start=t1, end=t2, midpoint=midpoint) SimulationRun.objects.create(start=t1, end=None, midpoint=midpoint) SimulationRun.objects.create(start=None, end=t2, midpoint=midpoint) SimulationRun.objects.create(start=None, end=None, midpoint=midpoint) queryset = SimulationRun.objects.filter(midpoint__range=[F('start__time'), F('end__time')]) self.assertQuerysetEqual( queryset, ['<SimulationRun: 13:00:00 (12:00:00 to 14:00:00)>'], ordered=False ) for alias in queryset.query.alias_map.values(): if isinstance(alias, Join): self.assertEqual(alias.join_type, constants.INNER) queryset = SimulationRun.objects.exclude(midpoint__range=[F('start__time'), F('end__time')]) self.assertQuerysetEqual(queryset, [], ordered=False) for alias in queryset.query.alias_map.values(): if isinstance(alias, Join): self.assertEqual(alias.join_type, constants.LOUTER) def test_range_lookup_allows_F_expressions_and_expressions_for_integers(self): # Range lookups can use F() expressions for integers. Company.objects.filter(num_employees__exact=F("num_chairs")) self.assertQuerysetEqual( Company.objects.filter(num_employees__range=(F('num_chairs'), 100)), ['<Company: 5020 Ltd>', '<Company: 5040 Ltd>', '<Company: 5050 Ltd>'], ordered=False ) self.assertQuerysetEqual( Company.objects.filter(num_employees__range=(F('num_chairs') - 10, F('num_chairs') + 10)), ['<Company: 5040 Ltd>', '<Company: 5050 Ltd>', '<Company: 5060 Ltd>'], ordered=False ) self.assertQuerysetEqual( Company.objects.filter(num_employees__range=(F('num_chairs') - 10, 100)), ['<Company: 5020 Ltd>', '<Company: 5040 Ltd>', '<Company: 5050 Ltd>', '<Company: 5060 Ltd>'], ordered=False ) self.assertQuerysetEqual( Company.objects.filter(num_employees__range=(1, 100)), [ '<Company: 5020 Ltd>', '<Company: 5040 Ltd>', '<Company: 5050 Ltd>', '<Company: 5060 Ltd>', '<Company: 99300 Ltd>', ], ordered=False ) @unittest.skipUnless(connection.vendor == 'sqlite', "This defensive test only works on databases that don't validate parameter types") def test_complex_expressions_do_not_introduce_sql_injection_via_untrusted_string_inclusion(self): """ This tests that SQL injection isn't possible using compilation of expressions in iterable filters, as their compilation happens before the main query compilation. It's limited to SQLite, as PostgreSQL, Oracle and other vendors have defense in depth against this by type checking. Testing against SQLite (the most permissive of the built-in databases) demonstrates that the problem doesn't exist while keeping the test simple. """ queryset = Company.objects.filter(name__in=[F('num_chairs') + '1)) OR ((1==1']) self.assertQuerysetEqual(queryset, [], ordered=False) def test_in_lookup_allows_F_expressions_and_expressions_for_datetimes(self): start = datetime.datetime(2016, 2, 3, 15, 0, 0) end = datetime.datetime(2016, 2, 5, 15, 0, 0) experiment_1 = Experiment.objects.create( name='Integrity testing', assigned=start.date(), start=start, end=end, completed=end.date(), estimated_time=end - start, ) experiment_2 = Experiment.objects.create( name='Taste testing', assigned=start.date(), start=start, end=end, completed=end.date(), estimated_time=end - start, ) Result.objects.create( experiment=experiment_1, result_time=datetime.datetime(2016, 2, 4, 15, 0, 0), ) Result.objects.create( experiment=experiment_1, result_time=datetime.datetime(2016, 3, 10, 2, 0, 0), ) Result.objects.create( experiment=experiment_2, result_time=datetime.datetime(2016, 1, 8, 5, 0, 0), ) within_experiment_time = [F('experiment__start'), F('experiment__end')] queryset = Result.objects.filter(result_time__range=within_experiment_time) self.assertQuerysetEqual(queryset, ["<Result: Result at 2016-02-04 15:00:00>"]) within_experiment_time = [F('experiment__start'), F('experiment__end')] queryset = Result.objects.filter(result_time__range=within_experiment_time) self.assertQuerysetEqual(queryset, ["<Result: Result at 2016-02-04 15:00:00>"]) class FTests(SimpleTestCase): def test_deepcopy(self): f = F("foo") g = deepcopy(f) self.assertEqual(f.name, g.name) def test_deconstruct(self): f = F('name') path, args, kwargs = f.deconstruct() self.assertEqual(path, 'django.db.models.expressions.F') self.assertEqual(args, (f.name,)) self.assertEqual(kwargs, {}) def test_equal(self): f = F('name') same_f = F('name') other_f = F('username') self.assertEqual(f, same_f) self.assertNotEqual(f, other_f) def test_hash(self): d = {F('name'): 'Bob'} self.assertIn(F('name'), d) self.assertEqual(d[F('name')], 'Bob') def test_not_equal_Value(self): f = F('name') value = Value('name') self.assertNotEqual(f, value) self.assertNotEqual(value, f) class ExpressionsTests(TestCase): def test_F_reuse(self): f = F('id') n = Number.objects.create(integer=-1) c = Company.objects.create( name="Example Inc.", num_employees=2300, num_chairs=5, ceo=Employee.objects.create(firstname="Joe", lastname="Smith") ) c_qs = Company.objects.filter(id=f) self.assertEqual(c_qs.get(), c) # Reuse the same F-object for another queryset n_qs = Number.objects.filter(id=f) self.assertEqual(n_qs.get(), n) # The original query still works correctly self.assertEqual(c_qs.get(), c) def test_patterns_escape(self): r""" Special characters (e.g. %, _ and \) stored in database are properly escaped when using a pattern lookup with an expression refs #16731 """ Employee.objects.bulk_create([ Employee(firstname="%Joh\\nny", lastname="%Joh\\n"), Employee(firstname="Johnny", lastname="%John"), Employee(firstname="Jean-Claude", lastname="Claud_"), Employee(firstname="Jean-Claude", lastname="Claude"), Employee(firstname="Jean-Claude", lastname="Claude%"), Employee(firstname="Johnny", lastname="Joh\\n"), Employee(firstname="Johnny", lastname="John"), Employee(firstname="Johnny", lastname="_ohn"), ]) self.assertQuerysetEqual( Employee.objects.filter(firstname__contains=F('lastname')), ["<Employee: %Joh\\nny %Joh\\n>", "<Employee: Jean-Claude Claude>", "<Employee: Johnny John>"], ordered=False, ) self.assertQuerysetEqual( Employee.objects.filter(firstname__startswith=F('lastname')), ["<Employee: %Joh\\nny %Joh\\n>", "<Employee: Johnny John>"], ordered=False, ) self.assertQuerysetEqual( Employee.objects.filter(firstname__endswith=F('lastname')), ["<Employee: Jean-Claude Claude>"], ordered=False, ) def test_insensitive_patterns_escape(self): r""" Special characters (e.g. %, _ and \) stored in database are properly escaped when using a case insensitive pattern lookup with an expression -- refs #16731 """ Employee.objects.bulk_create([ Employee(firstname="%Joh\\nny", lastname="%joh\\n"), Employee(firstname="Johnny", lastname="%john"), Employee(firstname="Jean-Claude", lastname="claud_"), Employee(firstname="Jean-Claude", lastname="claude"), Employee(firstname="Jean-Claude", lastname="claude%"), Employee(firstname="Johnny", lastname="joh\\n"), Employee(firstname="Johnny", lastname="john"), Employee(firstname="Johnny", lastname="_ohn"), ]) self.assertQuerysetEqual( Employee.objects.filter(firstname__icontains=F('lastname')), ["<Employee: %Joh\\nny %joh\\n>", "<Employee: Jean-Claude claude>", "<Employee: Johnny john>"], ordered=False, ) self.assertQuerysetEqual( Employee.objects.filter(firstname__istartswith=F('lastname')), ["<Employee: %Joh\\nny %joh\\n>", "<Employee: Johnny john>"], ordered=False, ) self.assertQuerysetEqual( Employee.objects.filter(firstname__iendswith=F('lastname')), ["<Employee: Jean-Claude claude>"], ordered=False, ) @isolate_apps('expressions') class SimpleExpressionTests(SimpleTestCase): def test_equal(self): self.assertEqual(Expression(), Expression()) self.assertEqual( Expression(models.IntegerField()), Expression(output_field=models.IntegerField()) ) self.assertEqual(Expression(models.IntegerField()), mock.ANY) self.assertNotEqual( Expression(models.IntegerField()), Expression(models.CharField()) ) class TestModel(models.Model): field = models.IntegerField() other_field = models.IntegerField() self.assertNotEqual( Expression(TestModel._meta.get_field('field')), Expression(TestModel._meta.get_field('other_field')), ) def test_hash(self): self.assertEqual(hash(Expression()), hash(Expression())) self.assertEqual( hash(Expression(models.IntegerField())), hash(Expression(output_field=models.IntegerField())) ) self.assertNotEqual( hash(Expression(models.IntegerField())), hash(Expression(models.CharField())), ) class TestModel(models.Model): field = models.IntegerField() other_field = models.IntegerField() self.assertNotEqual( hash(Expression(TestModel._meta.get_field('field'))), hash(Expression(TestModel._meta.get_field('other_field'))), ) class ExpressionsNumericTests(TestCase): @classmethod def setUpTestData(cls): Number(integer=-1).save() Number(integer=42).save() Number(integer=1337).save() Number.objects.update(float=F('integer')) def test_fill_with_value_from_same_object(self): """ We can fill a value in all objects with an other value of the same object. """ self.assertQuerysetEqual( Number.objects.all(), ['<Number: -1, -1.000>', '<Number: 42, 42.000>', '<Number: 1337, 1337.000>'], ordered=False ) def test_increment_value(self): """ We can increment a value of all objects in a query set. """ self.assertEqual(Number.objects.filter(integer__gt=0).update(integer=F('integer') + 1), 2) self.assertQuerysetEqual( Number.objects.all(), ['<Number: -1, -1.000>', '<Number: 43, 42.000>', '<Number: 1338, 1337.000>'], ordered=False ) def test_filter_not_equals_other_field(self): """ We can filter for objects, where a value is not equals the value of an other field. """ self.assertEqual(Number.objects.filter(integer__gt=0).update(integer=F('integer') + 1), 2) self.assertQuerysetEqual( Number.objects.exclude(float=F('integer')), ['<Number: 43, 42.000>', '<Number: 1338, 1337.000>'], ordered=False ) def test_complex_expressions(self): """ Complex expressions of different connection types are possible. """ n = Number.objects.create(integer=10, float=123.45) self.assertEqual(Number.objects.filter(pk=n.pk).update( float=F('integer') + F('float') * 2), 1) self.assertEqual(Number.objects.get(pk=n.pk).integer, 10) self.assertEqual(Number.objects.get(pk=n.pk).float, Approximate(256.900, places=3)) class ExpressionOperatorTests(TestCase): @classmethod def setUpTestData(cls): cls.n = Number.objects.create(integer=42, float=15.5) cls.n1 = Number.objects.create(integer=-42, float=-15.5) def test_lefthand_addition(self): # LH Addition of floats and integers Number.objects.filter(pk=self.n.pk).update( integer=F('integer') + 15, float=F('float') + 42.7 ) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 57) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(58.200, places=3)) def test_lefthand_subtraction(self): # LH Subtraction of floats and integers Number.objects.filter(pk=self.n.pk).update(integer=F('integer') - 15, float=F('float') - 42.7) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 27) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(-27.200, places=3)) def test_lefthand_multiplication(self): # Multiplication of floats and integers Number.objects.filter(pk=self.n.pk).update(integer=F('integer') * 15, float=F('float') * 42.7) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 630) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(661.850, places=3)) def test_lefthand_division(self): # LH Division of floats and integers Number.objects.filter(pk=self.n.pk).update(integer=F('integer') / 2, float=F('float') / 42.7) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 21) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(0.363, places=3)) def test_lefthand_modulo(self): # LH Modulo arithmetic on integers Number.objects.filter(pk=self.n.pk).update(integer=F('integer') % 20) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 2) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(15.500, places=3)) def test_lefthand_bitwise_and(self): # LH Bitwise ands on integers Number.objects.filter(pk=self.n.pk).update(integer=F('integer').bitand(56)) Number.objects.filter(pk=self.n1.pk).update(integer=F('integer').bitand(-56)) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 40) self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -64) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(15.500, places=3)) def test_lefthand_bitwise_left_shift_operator(self): Number.objects.update(integer=F('integer').bitleftshift(2)) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 168) self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -168) def test_lefthand_bitwise_right_shift_operator(self): Number.objects.update(integer=F('integer').bitrightshift(2)) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 10) self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -11) def test_lefthand_bitwise_or(self): # LH Bitwise or on integers Number.objects.update(integer=F('integer').bitor(48)) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 58) self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -10) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(15.500, places=3)) def test_lefthand_power(self): # LH Power arithmetic operation on floats and integers Number.objects.filter(pk=self.n.pk).update(integer=F('integer') ** 2, float=F('float') ** 1.5) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 1764) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(61.02, places=2)) def test_right_hand_addition(self): # Right hand operators Number.objects.filter(pk=self.n.pk).update(integer=15 + F('integer'), float=42.7 + F('float')) # RH Addition of floats and integers self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 57) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(58.200, places=3)) def test_right_hand_subtraction(self): Number.objects.filter(pk=self.n.pk).update(integer=15 - F('integer'), float=42.7 - F('float')) # RH Subtraction of floats and integers self.assertEqual(Number.objects.get(pk=self.n.pk).integer, -27) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(27.200, places=3)) def test_right_hand_multiplication(self): # RH Multiplication of floats and integers Number.objects.filter(pk=self.n.pk).update(integer=15 * F('integer'), float=42.7 * F('float')) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 630) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(661.850, places=3)) def test_right_hand_division(self): # RH Division of floats and integers Number.objects.filter(pk=self.n.pk).update(integer=640 / F('integer'), float=42.7 / F('float')) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 15) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(2.755, places=3)) def test_right_hand_modulo(self): # RH Modulo arithmetic on integers Number.objects.filter(pk=self.n.pk).update(integer=69 % F('integer')) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 27) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(15.500, places=3)) def test_righthand_power(self): # RH Power arithmetic operation on floats and integers Number.objects.filter(pk=self.n.pk).update(integer=2 ** F('integer'), float=1.5 ** F('float')) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 4398046511104) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(536.308, places=3)) class FTimeDeltaTests(TestCase): @classmethod def setUpTestData(cls): cls.sday = sday = datetime.date(2010, 6, 25) cls.stime = stime = datetime.datetime(2010, 6, 25, 12, 15, 30, 747000) midnight = datetime.time(0) delta0 = datetime.timedelta(0) delta1 = datetime.timedelta(microseconds=253000) delta2 = datetime.timedelta(seconds=44) delta3 = datetime.timedelta(hours=21, minutes=8) delta4 = datetime.timedelta(days=10) delta5 = datetime.timedelta(days=90) # Test data is set so that deltas and delays will be # strictly increasing. cls.deltas = [] cls.delays = [] cls.days_long = [] # e0: started same day as assigned, zero duration end = stime + delta0 e0 = Experiment.objects.create( name='e0', assigned=sday, start=stime, end=end, completed=end.date(), estimated_time=delta0, ) cls.deltas.append(delta0) cls.delays.append(e0.start - datetime.datetime.combine(e0.assigned, midnight)) cls.days_long.append(e0.completed - e0.assigned) # e1: started one day after assigned, tiny duration, data # set so that end time has no fractional seconds, which # tests an edge case on sqlite. delay = datetime.timedelta(1) end = stime + delay + delta1 e1 = Experiment.objects.create( name='e1', assigned=sday, start=stime + delay, end=end, completed=end.date(), estimated_time=delta1, ) cls.deltas.append(delta1) cls.delays.append(e1.start - datetime.datetime.combine(e1.assigned, midnight)) cls.days_long.append(e1.completed - e1.assigned) # e2: started three days after assigned, small duration end = stime + delta2 e2 = Experiment.objects.create( name='e2', assigned=sday - datetime.timedelta(3), start=stime, end=end, completed=end.date(), estimated_time=datetime.timedelta(hours=1), ) cls.deltas.append(delta2) cls.delays.append(e2.start - datetime.datetime.combine(e2.assigned, midnight)) cls.days_long.append(e2.completed - e2.assigned) # e3: started four days after assigned, medium duration delay = datetime.timedelta(4) end = stime + delay + delta3 e3 = Experiment.objects.create( name='e3', assigned=sday, start=stime + delay, end=end, completed=end.date(), estimated_time=delta3, ) cls.deltas.append(delta3) cls.delays.append(e3.start - datetime.datetime.combine(e3.assigned, midnight)) cls.days_long.append(e3.completed - e3.assigned) # e4: started 10 days after assignment, long duration end = stime + delta4 e4 = Experiment.objects.create( name='e4', assigned=sday - datetime.timedelta(10), start=stime, end=end, completed=end.date(), estimated_time=delta4 - datetime.timedelta(1), ) cls.deltas.append(delta4) cls.delays.append(e4.start - datetime.datetime.combine(e4.assigned, midnight)) cls.days_long.append(e4.completed - e4.assigned) # e5: started a month after assignment, very long duration delay = datetime.timedelta(30) end = stime + delay + delta5 e5 = Experiment.objects.create( name='e5', assigned=sday, start=stime + delay, end=end, completed=end.date(), estimated_time=delta5, ) cls.deltas.append(delta5) cls.delays.append(e5.start - datetime.datetime.combine(e5.assigned, midnight)) cls.days_long.append(e5.completed - e5.assigned) cls.expnames = [e.name for e in Experiment.objects.all()] def test_multiple_query_compilation(self): # Ticket #21643 queryset = Experiment.objects.filter(end__lt=F('start') + datetime.timedelta(hours=1)) q1 = str(queryset.query) q2 = str(queryset.query) self.assertEqual(q1, q2) def test_query_clone(self): # Ticket #21643 - Crash when compiling query more than once qs = Experiment.objects.filter(end__lt=F('start') + datetime.timedelta(hours=1)) qs2 = qs.all() list(qs) list(qs2) # Intentionally no assert def test_delta_add(self): for i, delta in enumerate(self.deltas): test_set = [e.name for e in Experiment.objects.filter(end__lt=F('start') + delta)] self.assertEqual(test_set, self.expnames[:i]) test_set = [e.name for e in Experiment.objects.filter(end__lt=delta + F('start'))] self.assertEqual(test_set, self.expnames[:i]) test_set = [e.name for e in Experiment.objects.filter(end__lte=F('start') + delta)] self.assertEqual(test_set, self.expnames[:i + 1]) def test_delta_subtract(self): for i, delta in enumerate(self.deltas): test_set = [e.name for e in Experiment.objects.filter(start__gt=F('end') - delta)] self.assertEqual(test_set, self.expnames[:i]) test_set = [e.name for e in Experiment.objects.filter(start__gte=F('end') - delta)] self.assertEqual(test_set, self.expnames[:i + 1]) def test_exclude(self): for i, delta in enumerate(self.deltas): test_set = [e.name for e in Experiment.objects.exclude(end__lt=F('start') + delta)] self.assertEqual(test_set, self.expnames[i:]) test_set = [e.name for e in Experiment.objects.exclude(end__lte=F('start') + delta)] self.assertEqual(test_set, self.expnames[i + 1:]) def test_date_comparison(self): for i, days in enumerate(self.days_long): test_set = [e.name for e in Experiment.objects.filter(completed__lt=F('assigned') + days)] self.assertEqual(test_set, self.expnames[:i]) test_set = [e.name for e in Experiment.objects.filter(completed__lte=F('assigned') + days)] self.assertEqual(test_set, self.expnames[:i + 1]) @skipUnlessDBFeature("supports_mixed_date_datetime_comparisons") def test_mixed_comparisons1(self): for i, delay in enumerate(self.delays): test_set = [e.name for e in Experiment.objects.filter(assigned__gt=F('start') - delay)] self.assertEqual(test_set, self.expnames[:i]) test_set = [e.name for e in Experiment.objects.filter(assigned__gte=F('start') - delay)] self.assertEqual(test_set, self.expnames[:i + 1]) def test_mixed_comparisons2(self): for i, delay in enumerate(self.delays): delay = datetime.timedelta(delay.days) test_set = [e.name for e in Experiment.objects.filter(start__lt=F('assigned') + delay)] self.assertEqual(test_set, self.expnames[:i]) test_set = [ e.name for e in Experiment.objects.filter(start__lte=F('assigned') + delay + datetime.timedelta(1)) ] self.assertEqual(test_set, self.expnames[:i + 1]) def test_delta_update(self): for delta in self.deltas: exps = Experiment.objects.all() expected_durations = [e.duration() for e in exps] expected_starts = [e.start + delta for e in exps] expected_ends = [e.end + delta for e in exps] Experiment.objects.update(start=F('start') + delta, end=F('end') + delta) exps = Experiment.objects.all() new_starts = [e.start for e in exps] new_ends = [e.end for e in exps] new_durations = [e.duration() for e in exps] self.assertEqual(expected_starts, new_starts) self.assertEqual(expected_ends, new_ends) self.assertEqual(expected_durations, new_durations) def test_invalid_operator(self): with self.assertRaises(DatabaseError): list(Experiment.objects.filter(start=F('start') * datetime.timedelta(0))) def test_durationfield_add(self): zeros = [e.name for e in Experiment.objects.filter(start=F('start') + F('estimated_time'))] self.assertEqual(zeros, ['e0']) end_less = [e.name for e in Experiment.objects.filter(end__lt=F('start') + F('estimated_time'))] self.assertEqual(end_less, ['e2']) delta_math = [ e.name for e in Experiment.objects.filter(end__gte=F('start') + F('estimated_time') + datetime.timedelta(hours=1)) ] self.assertEqual(delta_math, ['e4']) queryset = Experiment.objects.annotate(shifted=ExpressionWrapper( F('start') + Value(None, output_field=models.DurationField()), output_field=models.DateTimeField(), )) self.assertIsNone(queryset.first().shifted) @skipUnlessDBFeature('supports_temporal_subtraction') def test_date_subtraction(self): queryset = Experiment.objects.annotate( completion_duration=ExpressionWrapper( F('completed') - F('assigned'), output_field=models.DurationField() ) ) at_least_5_days = {e.name for e in queryset.filter(completion_duration__gte=datetime.timedelta(days=5))} self.assertEqual(at_least_5_days, {'e3', 'e4', 'e5'}) at_least_120_days = {e.name for e in queryset.filter(completion_duration__gte=datetime.timedelta(days=120))} self.assertEqual(at_least_120_days, {'e5'}) less_than_5_days = {e.name for e in queryset.filter(completion_duration__lt=datetime.timedelta(days=5))} self.assertEqual(less_than_5_days, {'e0', 'e1', 'e2'}) queryset = Experiment.objects.annotate(difference=ExpressionWrapper( F('completed') - Value(None, output_field=models.DateField()), output_field=models.DurationField(), )) self.assertIsNone(queryset.first().difference) queryset = Experiment.objects.annotate(shifted=ExpressionWrapper( F('completed') - Value(None, output_field=models.DurationField()), output_field=models.DateField(), )) self.assertIsNone(queryset.first().shifted) @skipUnlessDBFeature('supports_temporal_subtraction') def test_date_subquery_subtraction(self): subquery = Experiment.objects.filter(pk=OuterRef('pk')).values('completed') queryset = Experiment.objects.annotate( difference=ExpressionWrapper( subquery - F('completed'), output_field=models.DurationField(), ), ).filter(difference=datetime.timedelta()) self.assertTrue(queryset.exists()) @skipUnlessDBFeature('supports_temporal_subtraction') def test_time_subtraction(self): Time.objects.create(time=datetime.time(12, 30, 15, 2345)) queryset = Time.objects.annotate( difference=ExpressionWrapper( F('time') - Value(datetime.time(11, 15, 0), output_field=models.TimeField()), output_field=models.DurationField(), ) ) self.assertEqual( queryset.get().difference, datetime.timedelta(hours=1, minutes=15, seconds=15, microseconds=2345) ) queryset = Time.objects.annotate(difference=ExpressionWrapper( F('time') - Value(None, output_field=models.TimeField()), output_field=models.DurationField(), )) self.assertIsNone(queryset.first().difference) queryset = Time.objects.annotate(shifted=ExpressionWrapper( F('time') - Value(None, output_field=models.DurationField()), output_field=models.TimeField(), )) self.assertIsNone(queryset.first().shifted) @skipUnlessDBFeature('supports_temporal_subtraction') def test_time_subquery_subtraction(self): Time.objects.create(time=datetime.time(12, 30, 15, 2345)) subquery = Time.objects.filter(pk=OuterRef('pk')).values('time') queryset = Time.objects.annotate( difference=ExpressionWrapper( subquery - F('time'), output_field=models.DurationField(), ), ).filter(difference=datetime.timedelta()) self.assertTrue(queryset.exists()) @skipUnlessDBFeature('supports_temporal_subtraction') def test_datetime_subtraction(self): under_estimate = [ e.name for e in Experiment.objects.filter(estimated_time__gt=F('end') - F('start')) ] self.assertEqual(under_estimate, ['e2']) over_estimate = [ e.name for e in Experiment.objects.filter(estimated_time__lt=F('end') - F('start')) ] self.assertEqual(over_estimate, ['e4']) queryset = Experiment.objects.annotate(difference=ExpressionWrapper( F('start') - Value(None, output_field=models.DateTimeField()), output_field=models.DurationField(), )) self.assertIsNone(queryset.first().difference) queryset = Experiment.objects.annotate(shifted=ExpressionWrapper( F('start') - Value(None, output_field=models.DurationField()), output_field=models.DateTimeField(), )) self.assertIsNone(queryset.first().shifted) @skipUnlessDBFeature('supports_temporal_subtraction') def test_datetime_subquery_subtraction(self): subquery = Experiment.objects.filter(pk=OuterRef('pk')).values('start') queryset = Experiment.objects.annotate( difference=ExpressionWrapper( subquery - F('start'), output_field=models.DurationField(), ), ).filter(difference=datetime.timedelta()) self.assertTrue(queryset.exists()) @skipUnlessDBFeature('supports_temporal_subtraction') def test_datetime_subtraction_microseconds(self): delta = datetime.timedelta(microseconds=8999999999999999) Experiment.objects.update(end=F('start') + delta) qs = Experiment.objects.annotate( delta=ExpressionWrapper(F('end') - F('start'), output_field=models.DurationField()) ) for e in qs: self.assertEqual(e.delta, delta) def test_duration_with_datetime(self): # Exclude e1 which has very high precision so we can test this on all # backends regardless of whether or not it supports # microsecond_precision. over_estimate = Experiment.objects.exclude(name='e1').filter( completed__gt=self.stime + F('estimated_time'), ).order_by('name') self.assertQuerysetEqual(over_estimate, ['e3', 'e4', 'e5'], lambda e: e.name) def test_duration_with_datetime_microseconds(self): delta = datetime.timedelta(microseconds=8999999999999999) qs = Experiment.objects.annotate(dt=ExpressionWrapper( F('start') + delta, output_field=models.DateTimeField(), )) for e in qs: self.assertEqual(e.dt, e.start + delta) def test_date_minus_duration(self): more_than_4_days = Experiment.objects.filter( assigned__lt=F('completed') - Value(datetime.timedelta(days=4), output_field=models.DurationField()) ) self.assertQuerysetEqual(more_than_4_days, ['e3', 'e4', 'e5'], lambda e: e.name) def test_negative_timedelta_update(self): # subtract 30 seconds, 30 minutes, 2 hours and 2 days experiments = Experiment.objects.filter(name='e0').annotate( start_sub_seconds=F('start') + datetime.timedelta(seconds=-30), ).annotate( start_sub_minutes=F('start_sub_seconds') + datetime.timedelta(minutes=-30), ).annotate( start_sub_hours=F('start_sub_minutes') + datetime.timedelta(hours=-2), ).annotate( new_start=F('start_sub_hours') + datetime.timedelta(days=-2), ) expected_start = datetime.datetime(2010, 6, 23, 9, 45, 0) # subtract 30 microseconds experiments = experiments.annotate(new_start=F('new_start') + datetime.timedelta(microseconds=-30)) expected_start += datetime.timedelta(microseconds=+746970) experiments.update(start=F('new_start')) e0 = Experiment.objects.get(name='e0') self.assertEqual(e0.start, expected_start) class ValueTests(TestCase): def test_update_TimeField_using_Value(self): Time.objects.create() Time.objects.update(time=Value(datetime.time(1), output_field=TimeField())) self.assertEqual(Time.objects.get().time, datetime.time(1)) def test_update_UUIDField_using_Value(self): UUID.objects.create() UUID.objects.update(uuid=Value(uuid.UUID('12345678901234567890123456789012'), output_field=UUIDField())) self.assertEqual(UUID.objects.get().uuid, uuid.UUID('12345678901234567890123456789012')) def test_deconstruct(self): value = Value('name') path, args, kwargs = value.deconstruct() self.assertEqual(path, 'django.db.models.expressions.Value') self.assertEqual(args, (value.value,)) self.assertEqual(kwargs, {}) def test_deconstruct_output_field(self): value = Value('name', output_field=CharField()) path, args, kwargs = value.deconstruct() self.assertEqual(path, 'django.db.models.expressions.Value') self.assertEqual(args, (value.value,)) self.assertEqual(len(kwargs), 1) self.assertEqual(kwargs['output_field'].deconstruct(), CharField().deconstruct()) def test_equal(self): value = Value('name') self.assertEqual(value, Value('name')) self.assertNotEqual(value, Value('username')) def test_hash(self): d = {Value('name'): 'Bob'} self.assertIn(Value('name'), d) self.assertEqual(d[Value('name')], 'Bob') def test_equal_output_field(self): value = Value('name', output_field=CharField()) same_value = Value('name', output_field=CharField()) other_value = Value('name', output_field=TimeField()) no_output_field = Value('name') self.assertEqual(value, same_value) self.assertNotEqual(value, other_value) self.assertNotEqual(value, no_output_field) def test_raise_empty_expressionlist(self): msg = 'ExpressionList requires at least one expression' with self.assertRaisesMessage(ValueError, msg): ExpressionList() class FieldTransformTests(TestCase): @classmethod def setUpTestData(cls): cls.sday = sday = datetime.date(2010, 6, 25) cls.stime = stime = datetime.datetime(2010, 6, 25, 12, 15, 30, 747000) cls.ex1 = Experiment.objects.create( name='Experiment 1', assigned=sday, completed=sday + datetime.timedelta(2), estimated_time=datetime.timedelta(2), start=stime, end=stime + datetime.timedelta(2), ) def test_month_aggregation(self): self.assertEqual( Experiment.objects.aggregate(month_count=Count('assigned__month')), {'month_count': 1} ) def test_transform_in_values(self): self.assertQuerysetEqual( Experiment.objects.values('assigned__month'), ["{'assigned__month': 6}"] ) def test_multiple_transforms_in_values(self): self.assertQuerysetEqual( Experiment.objects.values('end__date__month'), ["{'end__date__month': 6}"] ) class ReprTests(SimpleTestCase): def test_expressions(self): self.assertEqual( repr(Case(When(a=1))), "<Case: CASE WHEN <Q: (AND: ('a', 1))> THEN Value(None), ELSE Value(None)>" ) self.assertEqual( repr(When(Q(age__gte=18), then=Value('legal'))), "<When: WHEN <Q: (AND: ('age__gte', 18))> THEN Value(legal)>" ) self.assertEqual(repr(Col('alias', 'field')), "Col(alias, field)") self.assertEqual(repr(F('published')), "F(published)") self.assertEqual(repr(F('cost') + F('tax')), "<CombinedExpression: F(cost) + F(tax)>") self.assertEqual( repr(ExpressionWrapper(F('cost') + F('tax'), models.IntegerField())), "ExpressionWrapper(F(cost) + F(tax))" ) self.assertEqual(repr(Func('published', function='TO_CHAR')), "Func(F(published), function=TO_CHAR)") self.assertEqual(repr(OrderBy(Value(1))), 'OrderBy(Value(1), descending=False)') self.assertEqual(repr(Random()), "Random()") self.assertEqual(repr(RawSQL('table.col', [])), "RawSQL(table.col, [])") self.assertEqual(repr(Ref('sum_cost', Sum('cost'))), "Ref(sum_cost, Sum(F(cost)))") self.assertEqual(repr(Value(1)), "Value(1)") self.assertEqual( repr(ExpressionList(F('col'), F('anothercol'))), 'ExpressionList(F(col), F(anothercol))' ) self.assertEqual( repr(ExpressionList(OrderBy(F('col'), descending=False))), 'ExpressionList(OrderBy(F(col), descending=False))' ) def test_functions(self): self.assertEqual(repr(Coalesce('a', 'b')), "Coalesce(F(a), F(b))") self.assertEqual(repr(Concat('a', 'b')), "Concat(ConcatPair(F(a), F(b)))") self.assertEqual(repr(Length('a')), "Length(F(a))") self.assertEqual(repr(Lower('a')), "Lower(F(a))") self.assertEqual(repr(Substr('a', 1, 3)), "Substr(F(a), Value(1), Value(3))") self.assertEqual(repr(Upper('a')), "Upper(F(a))") def test_aggregates(self): self.assertEqual(repr(Avg('a')), "Avg(F(a))") self.assertEqual(repr(Count('a')), "Count(F(a))") self.assertEqual(repr(Count('*')), "Count('*')") self.assertEqual(repr(Max('a')), "Max(F(a))") self.assertEqual(repr(Min('a')), "Min(F(a))") self.assertEqual(repr(StdDev('a')), "StdDev(F(a), sample=False)") self.assertEqual(repr(Sum('a')), "Sum(F(a))") self.assertEqual(repr(Variance('a', sample=True)), "Variance(F(a), sample=True)") def test_distinct_aggregates(self): self.assertEqual(repr(Count('a', distinct=True)), "Count(F(a), distinct=True)") self.assertEqual(repr(Count('*', distinct=True)), "Count('*', distinct=True)") def test_filtered_aggregates(self): filter = Q(a=1) self.assertEqual(repr(Avg('a', filter=filter)), "Avg(F(a), filter=(AND: ('a', 1)))") self.assertEqual(repr(Count('a', filter=filter)), "Count(F(a), filter=(AND: ('a', 1)))") self.assertEqual(repr(Max('a', filter=filter)), "Max(F(a), filter=(AND: ('a', 1)))") self.assertEqual(repr(Min('a', filter=filter)), "Min(F(a), filter=(AND: ('a', 1)))") self.assertEqual(repr(StdDev('a', filter=filter)), "StdDev(F(a), filter=(AND: ('a', 1)), sample=False)") self.assertEqual(repr(Sum('a', filter=filter)), "Sum(F(a), filter=(AND: ('a', 1)))") self.assertEqual( repr(Variance('a', sample=True, filter=filter)), "Variance(F(a), filter=(AND: ('a', 1)), sample=True)" ) self.assertEqual( repr(Count('a', filter=filter, distinct=True)), "Count(F(a), distinct=True, filter=(AND: ('a', 1)))" ) class CombinableTests(SimpleTestCase): bitwise_msg = 'Use .bitand() and .bitor() for bitwise logical operations.' def test_negation(self): c = Combinable() self.assertEqual(-c, c * -1) def test_and(self): with self.assertRaisesMessage(NotImplementedError, self.bitwise_msg): Combinable() & Combinable() def test_or(self): with self.assertRaisesMessage(NotImplementedError, self.bitwise_msg): Combinable() | Combinable() def test_reversed_and(self): with self.assertRaisesMessage(NotImplementedError, self.bitwise_msg): object() & Combinable() def test_reversed_or(self): with self.assertRaisesMessage(NotImplementedError, self.bitwise_msg): object() | Combinable()
8003b854d1b34101dadba9511fe7b60a253c3b4e738f750b1c72645528bd558e
import datetime import re import sys from contextlib import contextmanager from unittest import SkipTest, skipIf from xml.dom.minidom import parseString import pytz from django.contrib.auth.models import User from django.core import serializers from django.db import connection from django.db.models import F, Max, Min from django.http import HttpRequest from django.template import ( Context, RequestContext, Template, TemplateSyntaxError, context_processors, ) from django.test import ( SimpleTestCase, TestCase, TransactionTestCase, override_settings, skipIfDBFeature, skipUnlessDBFeature, ) from django.test.utils import requires_tz_support from django.urls import reverse from django.utils import timezone from django.utils.timezone import timedelta from .forms import ( EventForm, EventLocalizedForm, EventLocalizedModelForm, EventModelForm, EventSplitForm, ) from .models import ( AllDayEvent, Event, MaybeEvent, Session, SessionEvent, Timestamp, ) try: import yaml HAS_YAML = True except ImportError: HAS_YAML = False # These tests use the EAT (Eastern Africa Time) and ICT (Indochina Time) # who don't have Daylight Saving Time, so we can represent them easily # with fixed offset timezones and use them directly as tzinfo in the # constructors. # settings.TIME_ZONE is forced to EAT. Most tests use a variant of # datetime.datetime(2011, 9, 1, 13, 20, 30), which translates to # 10:20:30 in UTC and 17:20:30 in ICT. UTC = timezone.utc EAT = timezone.get_fixed_timezone(180) # Africa/Nairobi ICT = timezone.get_fixed_timezone(420) # Asia/Bangkok @contextmanager def override_database_connection_timezone(timezone): try: orig_timezone = connection.settings_dict['TIME_ZONE'] connection.settings_dict['TIME_ZONE'] = timezone # Clear cached properties, after first accessing them to ensure they exist. connection.timezone del connection.timezone connection.timezone_name del connection.timezone_name yield finally: connection.settings_dict['TIME_ZONE'] = orig_timezone # Clear cached properties, after first accessing them to ensure they exist. connection.timezone del connection.timezone connection.timezone_name del connection.timezone_name @override_settings(TIME_ZONE='Africa/Nairobi', USE_TZ=False) class LegacyDatabaseTests(TestCase): def test_naive_datetime(self): dt = datetime.datetime(2011, 9, 1, 13, 20, 30) Event.objects.create(dt=dt) event = Event.objects.get() self.assertEqual(event.dt, dt) def test_naive_datetime_with_microsecond(self): dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060) Event.objects.create(dt=dt) event = Event.objects.get() self.assertEqual(event.dt, dt) @skipUnlessDBFeature('supports_timezones') def test_aware_datetime_in_local_timezone(self): dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT) Event.objects.create(dt=dt) event = Event.objects.get() self.assertIsNone(event.dt.tzinfo) # interpret the naive datetime in local time to get the correct value self.assertEqual(event.dt.replace(tzinfo=EAT), dt) @skipUnlessDBFeature('supports_timezones') def test_aware_datetime_in_local_timezone_with_microsecond(self): dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060, tzinfo=EAT) Event.objects.create(dt=dt) event = Event.objects.get() self.assertIsNone(event.dt.tzinfo) # interpret the naive datetime in local time to get the correct value self.assertEqual(event.dt.replace(tzinfo=EAT), dt) @skipUnlessDBFeature('supports_timezones') def test_aware_datetime_in_utc(self): dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC) Event.objects.create(dt=dt) event = Event.objects.get() self.assertIsNone(event.dt.tzinfo) # interpret the naive datetime in local time to get the correct value self.assertEqual(event.dt.replace(tzinfo=EAT), dt) @skipUnlessDBFeature('supports_timezones') def test_aware_datetime_in_other_timezone(self): dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT) Event.objects.create(dt=dt) event = Event.objects.get() self.assertIsNone(event.dt.tzinfo) # interpret the naive datetime in local time to get the correct value self.assertEqual(event.dt.replace(tzinfo=EAT), dt) @skipIfDBFeature('supports_timezones') def test_aware_datetime_unsupported(self): dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT) msg = 'backend does not support timezone-aware datetimes when USE_TZ is False.' with self.assertRaisesMessage(ValueError, msg): Event.objects.create(dt=dt) def test_auto_now_and_auto_now_add(self): now = datetime.datetime.now() past = now - datetime.timedelta(seconds=2) future = now + datetime.timedelta(seconds=2) Timestamp.objects.create() ts = Timestamp.objects.get() self.assertLess(past, ts.created) self.assertLess(past, ts.updated) self.assertGreater(future, ts.updated) self.assertGreater(future, ts.updated) def test_query_filter(self): dt1 = datetime.datetime(2011, 9, 1, 12, 20, 30) dt2 = datetime.datetime(2011, 9, 1, 14, 20, 30) Event.objects.create(dt=dt1) Event.objects.create(dt=dt2) self.assertEqual(Event.objects.filter(dt__gte=dt1).count(), 2) self.assertEqual(Event.objects.filter(dt__gt=dt1).count(), 1) self.assertEqual(Event.objects.filter(dt__gte=dt2).count(), 1) self.assertEqual(Event.objects.filter(dt__gt=dt2).count(), 0) def test_query_datetime_lookups(self): Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0)) Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0)) self.assertEqual(Event.objects.filter(dt__year=2011).count(), 2) self.assertEqual(Event.objects.filter(dt__month=1).count(), 2) self.assertEqual(Event.objects.filter(dt__day=1).count(), 2) self.assertEqual(Event.objects.filter(dt__week_day=7).count(), 2) self.assertEqual(Event.objects.filter(dt__iso_week_day=6).count(), 2) self.assertEqual(Event.objects.filter(dt__hour=1).count(), 1) self.assertEqual(Event.objects.filter(dt__minute=30).count(), 2) self.assertEqual(Event.objects.filter(dt__second=0).count(), 2) def test_query_aggregation(self): # Only min and max make sense for datetimes. Event.objects.create(dt=datetime.datetime(2011, 9, 1, 23, 20, 20)) Event.objects.create(dt=datetime.datetime(2011, 9, 1, 13, 20, 30)) Event.objects.create(dt=datetime.datetime(2011, 9, 1, 3, 20, 40)) result = Event.objects.all().aggregate(Min('dt'), Max('dt')) self.assertEqual(result, { 'dt__min': datetime.datetime(2011, 9, 1, 3, 20, 40), 'dt__max': datetime.datetime(2011, 9, 1, 23, 20, 20), }) def test_query_annotation(self): # Only min and max make sense for datetimes. morning = Session.objects.create(name='morning') afternoon = Session.objects.create(name='afternoon') SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 23, 20, 20), session=afternoon) SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 13, 20, 30), session=afternoon) SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 3, 20, 40), session=morning) morning_min_dt = datetime.datetime(2011, 9, 1, 3, 20, 40) afternoon_min_dt = datetime.datetime(2011, 9, 1, 13, 20, 30) self.assertQuerysetEqual( Session.objects.annotate(dt=Min('events__dt')).order_by('dt'), [morning_min_dt, afternoon_min_dt], transform=lambda d: d.dt, ) self.assertQuerysetEqual( Session.objects.annotate(dt=Min('events__dt')).filter(dt__lt=afternoon_min_dt), [morning_min_dt], transform=lambda d: d.dt, ) self.assertQuerysetEqual( Session.objects.annotate(dt=Min('events__dt')).filter(dt__gte=afternoon_min_dt), [afternoon_min_dt], transform=lambda d: d.dt, ) def test_query_datetimes(self): Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0)) Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0)) self.assertSequenceEqual(Event.objects.datetimes('dt', 'year'), [datetime.datetime(2011, 1, 1, 0, 0, 0)]) self.assertSequenceEqual(Event.objects.datetimes('dt', 'month'), [datetime.datetime(2011, 1, 1, 0, 0, 0)]) self.assertSequenceEqual(Event.objects.datetimes('dt', 'day'), [datetime.datetime(2011, 1, 1, 0, 0, 0)]) self.assertSequenceEqual( Event.objects.datetimes('dt', 'hour'), [datetime.datetime(2011, 1, 1, 1, 0, 0), datetime.datetime(2011, 1, 1, 4, 0, 0)] ) self.assertSequenceEqual( Event.objects.datetimes('dt', 'minute'), [datetime.datetime(2011, 1, 1, 1, 30, 0), datetime.datetime(2011, 1, 1, 4, 30, 0)] ) self.assertSequenceEqual( Event.objects.datetimes('dt', 'second'), [datetime.datetime(2011, 1, 1, 1, 30, 0), datetime.datetime(2011, 1, 1, 4, 30, 0)] ) def test_raw_sql(self): # Regression test for #17755 dt = datetime.datetime(2011, 9, 1, 13, 20, 30) event = Event.objects.create(dt=dt) self.assertEqual(list(Event.objects.raw('SELECT * FROM timezones_event WHERE dt = %s', [dt])), [event]) def test_cursor_execute_accepts_naive_datetime(self): dt = datetime.datetime(2011, 9, 1, 13, 20, 30) with connection.cursor() as cursor: cursor.execute('INSERT INTO timezones_event (dt) VALUES (%s)', [dt]) event = Event.objects.get() self.assertEqual(event.dt, dt) def test_cursor_execute_returns_naive_datetime(self): dt = datetime.datetime(2011, 9, 1, 13, 20, 30) Event.objects.create(dt=dt) with connection.cursor() as cursor: cursor.execute('SELECT dt FROM timezones_event WHERE dt = %s', [dt]) self.assertEqual(cursor.fetchall()[0][0], dt) def test_filter_date_field_with_aware_datetime(self): # Regression test for #17742 day = datetime.date(2011, 9, 1) AllDayEvent.objects.create(day=day) # This is 2011-09-02T01:30:00+03:00 in EAT dt = datetime.datetime(2011, 9, 1, 22, 30, 0, tzinfo=UTC) self.assertTrue(AllDayEvent.objects.filter(day__gte=dt).exists()) @override_settings(TIME_ZONE='Africa/Nairobi', USE_TZ=True) class NewDatabaseTests(TestCase): naive_warning = 'DateTimeField Event.dt received a naive datetime' @requires_tz_support def test_naive_datetime(self): dt = datetime.datetime(2011, 9, 1, 13, 20, 30) with self.assertWarnsMessage(RuntimeWarning, self.naive_warning): Event.objects.create(dt=dt) event = Event.objects.get() # naive datetimes are interpreted in local time self.assertEqual(event.dt, dt.replace(tzinfo=EAT)) @requires_tz_support def test_datetime_from_date(self): dt = datetime.date(2011, 9, 1) with self.assertWarnsMessage(RuntimeWarning, self.naive_warning): Event.objects.create(dt=dt) event = Event.objects.get() self.assertEqual(event.dt, datetime.datetime(2011, 9, 1, tzinfo=EAT)) @requires_tz_support def test_naive_datetime_with_microsecond(self): dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060) with self.assertWarnsMessage(RuntimeWarning, self.naive_warning): Event.objects.create(dt=dt) event = Event.objects.get() # naive datetimes are interpreted in local time self.assertEqual(event.dt, dt.replace(tzinfo=EAT)) def test_aware_datetime_in_local_timezone(self): dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT) Event.objects.create(dt=dt) event = Event.objects.get() self.assertEqual(event.dt, dt) def test_aware_datetime_in_local_timezone_with_microsecond(self): dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060, tzinfo=EAT) Event.objects.create(dt=dt) event = Event.objects.get() self.assertEqual(event.dt, dt) def test_aware_datetime_in_utc(self): dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC) Event.objects.create(dt=dt) event = Event.objects.get() self.assertEqual(event.dt, dt) def test_aware_datetime_in_other_timezone(self): dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT) Event.objects.create(dt=dt) event = Event.objects.get() self.assertEqual(event.dt, dt) def test_auto_now_and_auto_now_add(self): now = timezone.now() past = now - datetime.timedelta(seconds=2) future = now + datetime.timedelta(seconds=2) Timestamp.objects.create() ts = Timestamp.objects.get() self.assertLess(past, ts.created) self.assertLess(past, ts.updated) self.assertGreater(future, ts.updated) self.assertGreater(future, ts.updated) def test_query_filter(self): dt1 = datetime.datetime(2011, 9, 1, 12, 20, 30, tzinfo=EAT) dt2 = datetime.datetime(2011, 9, 1, 14, 20, 30, tzinfo=EAT) Event.objects.create(dt=dt1) Event.objects.create(dt=dt2) self.assertEqual(Event.objects.filter(dt__gte=dt1).count(), 2) self.assertEqual(Event.objects.filter(dt__gt=dt1).count(), 1) self.assertEqual(Event.objects.filter(dt__gte=dt2).count(), 1) self.assertEqual(Event.objects.filter(dt__gt=dt2).count(), 0) def test_query_filter_with_pytz_timezones(self): tz = pytz.timezone('Europe/Paris') dt = datetime.datetime(2011, 9, 1, 12, 20, 30, tzinfo=tz) Event.objects.create(dt=dt) next = dt + datetime.timedelta(seconds=3) prev = dt - datetime.timedelta(seconds=3) self.assertEqual(Event.objects.filter(dt__exact=dt).count(), 1) self.assertEqual(Event.objects.filter(dt__exact=next).count(), 0) self.assertEqual(Event.objects.filter(dt__in=(prev, next)).count(), 0) self.assertEqual(Event.objects.filter(dt__in=(prev, dt, next)).count(), 1) self.assertEqual(Event.objects.filter(dt__range=(prev, next)).count(), 1) def test_query_convert_timezones(self): # Connection timezone is equal to the current timezone, datetime # shouldn't be converted. with override_database_connection_timezone('Africa/Nairobi'): event_datetime = datetime.datetime(2016, 1, 2, 23, 10, 11, 123, tzinfo=EAT) event = Event.objects.create(dt=event_datetime) self.assertEqual(Event.objects.filter(dt__date=event_datetime.date()).first(), event) # Connection timezone is not equal to the current timezone, datetime # should be converted (-4h). with override_database_connection_timezone('Asia/Bangkok'): event_datetime = datetime.datetime(2016, 1, 2, 3, 10, 11, tzinfo=ICT) event = Event.objects.create(dt=event_datetime) self.assertEqual(Event.objects.filter(dt__date=datetime.date(2016, 1, 1)).first(), event) @requires_tz_support def test_query_filter_with_naive_datetime(self): dt = datetime.datetime(2011, 9, 1, 12, 20, 30, tzinfo=EAT) Event.objects.create(dt=dt) dt = dt.replace(tzinfo=None) # naive datetimes are interpreted in local time with self.assertWarnsMessage(RuntimeWarning, self.naive_warning): self.assertEqual(Event.objects.filter(dt__exact=dt).count(), 1) with self.assertWarnsMessage(RuntimeWarning, self.naive_warning): self.assertEqual(Event.objects.filter(dt__lte=dt).count(), 1) with self.assertWarnsMessage(RuntimeWarning, self.naive_warning): self.assertEqual(Event.objects.filter(dt__gt=dt).count(), 0) @skipUnlessDBFeature('has_zoneinfo_database') def test_query_datetime_lookups(self): Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT)) Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT)) self.assertEqual(Event.objects.filter(dt__year=2011).count(), 2) self.assertEqual(Event.objects.filter(dt__month=1).count(), 2) self.assertEqual(Event.objects.filter(dt__day=1).count(), 2) self.assertEqual(Event.objects.filter(dt__week_day=7).count(), 2) self.assertEqual(Event.objects.filter(dt__iso_week_day=6).count(), 2) self.assertEqual(Event.objects.filter(dt__hour=1).count(), 1) self.assertEqual(Event.objects.filter(dt__minute=30).count(), 2) self.assertEqual(Event.objects.filter(dt__second=0).count(), 2) @skipUnlessDBFeature('has_zoneinfo_database') def test_query_datetime_lookups_in_other_timezone(self): Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT)) Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT)) with timezone.override(UTC): # These two dates fall in the same day in EAT, but in different days, # years and months in UTC. self.assertEqual(Event.objects.filter(dt__year=2011).count(), 1) self.assertEqual(Event.objects.filter(dt__month=1).count(), 1) self.assertEqual(Event.objects.filter(dt__day=1).count(), 1) self.assertEqual(Event.objects.filter(dt__week_day=7).count(), 1) self.assertEqual(Event.objects.filter(dt__iso_week_day=6).count(), 1) self.assertEqual(Event.objects.filter(dt__hour=22).count(), 1) self.assertEqual(Event.objects.filter(dt__minute=30).count(), 2) self.assertEqual(Event.objects.filter(dt__second=0).count(), 2) def test_query_aggregation(self): # Only min and max make sense for datetimes. Event.objects.create(dt=datetime.datetime(2011, 9, 1, 23, 20, 20, tzinfo=EAT)) Event.objects.create(dt=datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)) Event.objects.create(dt=datetime.datetime(2011, 9, 1, 3, 20, 40, tzinfo=EAT)) result = Event.objects.all().aggregate(Min('dt'), Max('dt')) self.assertEqual(result, { 'dt__min': datetime.datetime(2011, 9, 1, 3, 20, 40, tzinfo=EAT), 'dt__max': datetime.datetime(2011, 9, 1, 23, 20, 20, tzinfo=EAT), }) def test_query_annotation(self): # Only min and max make sense for datetimes. morning = Session.objects.create(name='morning') afternoon = Session.objects.create(name='afternoon') SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 23, 20, 20, tzinfo=EAT), session=afternoon) SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT), session=afternoon) SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 3, 20, 40, tzinfo=EAT), session=morning) morning_min_dt = datetime.datetime(2011, 9, 1, 3, 20, 40, tzinfo=EAT) afternoon_min_dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT) self.assertQuerysetEqual( Session.objects.annotate(dt=Min('events__dt')).order_by('dt'), [morning_min_dt, afternoon_min_dt], transform=lambda d: d.dt, ) self.assertQuerysetEqual( Session.objects.annotate(dt=Min('events__dt')).filter(dt__lt=afternoon_min_dt), [morning_min_dt], transform=lambda d: d.dt, ) self.assertQuerysetEqual( Session.objects.annotate(dt=Min('events__dt')).filter(dt__gte=afternoon_min_dt), [afternoon_min_dt], transform=lambda d: d.dt, ) @skipUnlessDBFeature('has_zoneinfo_database') def test_query_datetimes(self): Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT)) Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT)) self.assertSequenceEqual( Event.objects.datetimes('dt', 'year'), [datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=EAT)] ) self.assertSequenceEqual( Event.objects.datetimes('dt', 'month'), [datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=EAT)] ) self.assertSequenceEqual( Event.objects.datetimes('dt', 'day'), [datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=EAT)] ) self.assertSequenceEqual( Event.objects.datetimes('dt', 'hour'), [datetime.datetime(2011, 1, 1, 1, 0, 0, tzinfo=EAT), datetime.datetime(2011, 1, 1, 4, 0, 0, tzinfo=EAT)] ) self.assertSequenceEqual( Event.objects.datetimes('dt', 'minute'), [datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT), datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT)] ) self.assertSequenceEqual( Event.objects.datetimes('dt', 'second'), [datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT), datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT)] ) @skipUnlessDBFeature('has_zoneinfo_database') def test_query_datetimes_in_other_timezone(self): Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT)) Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT)) with timezone.override(UTC): self.assertSequenceEqual( Event.objects.datetimes('dt', 'year'), [datetime.datetime(2010, 1, 1, 0, 0, 0, tzinfo=UTC), datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=UTC)] ) self.assertSequenceEqual( Event.objects.datetimes('dt', 'month'), [datetime.datetime(2010, 12, 1, 0, 0, 0, tzinfo=UTC), datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=UTC)] ) self.assertSequenceEqual( Event.objects.datetimes('dt', 'day'), [datetime.datetime(2010, 12, 31, 0, 0, 0, tzinfo=UTC), datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=UTC)] ) self.assertSequenceEqual( Event.objects.datetimes('dt', 'hour'), [datetime.datetime(2010, 12, 31, 22, 0, 0, tzinfo=UTC), datetime.datetime(2011, 1, 1, 1, 0, 0, tzinfo=UTC)] ) self.assertSequenceEqual( Event.objects.datetimes('dt', 'minute'), [datetime.datetime(2010, 12, 31, 22, 30, 0, tzinfo=UTC), datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=UTC)] ) self.assertSequenceEqual( Event.objects.datetimes('dt', 'second'), [datetime.datetime(2010, 12, 31, 22, 30, 0, tzinfo=UTC), datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=UTC)] ) def test_raw_sql(self): # Regression test for #17755 dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT) event = Event.objects.create(dt=dt) self.assertSequenceEqual(list(Event.objects.raw('SELECT * FROM timezones_event WHERE dt = %s', [dt])), [event]) @skipUnlessDBFeature('supports_timezones') def test_cursor_execute_accepts_aware_datetime(self): dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT) with connection.cursor() as cursor: cursor.execute('INSERT INTO timezones_event (dt) VALUES (%s)', [dt]) event = Event.objects.get() self.assertEqual(event.dt, dt) @skipIfDBFeature('supports_timezones') def test_cursor_execute_accepts_naive_datetime(self): dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT) utc_naive_dt = timezone.make_naive(dt, timezone.utc) with connection.cursor() as cursor: cursor.execute('INSERT INTO timezones_event (dt) VALUES (%s)', [utc_naive_dt]) event = Event.objects.get() self.assertEqual(event.dt, dt) @skipUnlessDBFeature('supports_timezones') def test_cursor_execute_returns_aware_datetime(self): dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT) Event.objects.create(dt=dt) with connection.cursor() as cursor: cursor.execute('SELECT dt FROM timezones_event WHERE dt = %s', [dt]) self.assertEqual(cursor.fetchall()[0][0], dt) @skipIfDBFeature('supports_timezones') def test_cursor_execute_returns_naive_datetime(self): dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT) utc_naive_dt = timezone.make_naive(dt, timezone.utc) Event.objects.create(dt=dt) with connection.cursor() as cursor: cursor.execute('SELECT dt FROM timezones_event WHERE dt = %s', [utc_naive_dt]) self.assertEqual(cursor.fetchall()[0][0], utc_naive_dt) @skipUnlessDBFeature('supports_timezones') def test_cursor_explicit_time_zone(self): with override_database_connection_timezone('Europe/Paris'): with connection.cursor() as cursor: cursor.execute('SELECT CURRENT_TIMESTAMP') now = cursor.fetchone()[0] self.assertEqual(now.tzinfo.zone, 'Europe/Paris') @requires_tz_support def test_filter_date_field_with_aware_datetime(self): # Regression test for #17742 day = datetime.date(2011, 9, 1) AllDayEvent.objects.create(day=day) # This is 2011-09-02T01:30:00+03:00 in EAT dt = datetime.datetime(2011, 9, 1, 22, 30, 0, tzinfo=UTC) self.assertFalse(AllDayEvent.objects.filter(day__gte=dt).exists()) def test_null_datetime(self): # Regression test for #17294 e = MaybeEvent.objects.create() self.assertIsNone(e.dt) def test_update_with_timedelta(self): initial_dt = timezone.now().replace(microsecond=0) event = Event.objects.create(dt=initial_dt) Event.objects.update(dt=F('dt') + timedelta(hours=2)) event.refresh_from_db() self.assertEqual(event.dt, initial_dt + timedelta(hours=2)) @override_settings(TIME_ZONE='Africa/Nairobi', USE_TZ=True) class ForcedTimeZoneDatabaseTests(TransactionTestCase): """ Test the TIME_ZONE database configuration parameter. Since this involves reading and writing to the same database through two connections, this is a TransactionTestCase. """ available_apps = ['timezones'] @classmethod def setUpClass(cls): # @skipIfDBFeature and @skipUnlessDBFeature cannot be chained. The # outermost takes precedence. Handle skipping manually instead. if connection.features.supports_timezones: raise SkipTest("Database has feature(s) supports_timezones") if not connection.features.test_db_allows_multiple_connections: raise SkipTest("Database doesn't support feature(s): test_db_allows_multiple_connections") super().setUpClass() def test_read_datetime(self): fake_dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=UTC) Event.objects.create(dt=fake_dt) with override_database_connection_timezone('Asia/Bangkok'): event = Event.objects.get() dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC) self.assertEqual(event.dt, dt) def test_write_datetime(self): dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC) with override_database_connection_timezone('Asia/Bangkok'): Event.objects.create(dt=dt) event = Event.objects.get() fake_dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=UTC) self.assertEqual(event.dt, fake_dt) @override_settings(TIME_ZONE='Africa/Nairobi') class SerializationTests(SimpleTestCase): # Backend-specific notes: # - JSON supports only milliseconds, microseconds will be truncated. # - PyYAML dumps the UTC offset correctly for timezone-aware datetimes. # When PyYAML < 5.3 loads this representation, it subtracts the offset # and returns a naive datetime object in UTC. PyYAML 5.3+ loads timezones # correctly. # Tests are adapted to take these quirks into account. def assert_python_contains_datetime(self, objects, dt): self.assertEqual(objects[0]['fields']['dt'], dt) def assert_json_contains_datetime(self, json, dt): self.assertIn('"fields": {"dt": "%s"}' % dt, json) def assert_xml_contains_datetime(self, xml, dt): field = parseString(xml).getElementsByTagName('field')[0] self.assertXMLEqual(field.childNodes[0].wholeText, dt) def assert_yaml_contains_datetime(self, yaml, dt): # Depending on the yaml dumper, '!timestamp' might be absent self.assertRegex(yaml, r"\n fields: {dt: !(!timestamp)? '%s'}" % re.escape(dt)) def test_naive_datetime(self): dt = datetime.datetime(2011, 9, 1, 13, 20, 30) data = serializers.serialize('python', [Event(dt=dt)]) self.assert_python_contains_datetime(data, dt) obj = next(serializers.deserialize('python', data)).object self.assertEqual(obj.dt, dt) data = serializers.serialize('json', [Event(dt=dt)]) self.assert_json_contains_datetime(data, "2011-09-01T13:20:30") obj = next(serializers.deserialize('json', data)).object self.assertEqual(obj.dt, dt) data = serializers.serialize('xml', [Event(dt=dt)]) self.assert_xml_contains_datetime(data, "2011-09-01T13:20:30") obj = next(serializers.deserialize('xml', data)).object self.assertEqual(obj.dt, dt) if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer): data = serializers.serialize('yaml', [Event(dt=dt)], default_flow_style=None) self.assert_yaml_contains_datetime(data, "2011-09-01 13:20:30") obj = next(serializers.deserialize('yaml', data)).object self.assertEqual(obj.dt, dt) def test_naive_datetime_with_microsecond(self): dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060) data = serializers.serialize('python', [Event(dt=dt)]) self.assert_python_contains_datetime(data, dt) obj = next(serializers.deserialize('python', data)).object self.assertEqual(obj.dt, dt) data = serializers.serialize('json', [Event(dt=dt)]) self.assert_json_contains_datetime(data, "2011-09-01T13:20:30.405") obj = next(serializers.deserialize('json', data)).object self.assertEqual(obj.dt, dt.replace(microsecond=405000)) data = serializers.serialize('xml', [Event(dt=dt)]) self.assert_xml_contains_datetime(data, "2011-09-01T13:20:30.405060") obj = next(serializers.deserialize('xml', data)).object self.assertEqual(obj.dt, dt) if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer): data = serializers.serialize('yaml', [Event(dt=dt)], default_flow_style=None) self.assert_yaml_contains_datetime(data, "2011-09-01 13:20:30.405060") obj = next(serializers.deserialize('yaml', data)).object self.assertEqual(obj.dt, dt) def test_aware_datetime_with_microsecond(self): dt = datetime.datetime(2011, 9, 1, 17, 20, 30, 405060, tzinfo=ICT) data = serializers.serialize('python', [Event(dt=dt)]) self.assert_python_contains_datetime(data, dt) obj = next(serializers.deserialize('python', data)).object self.assertEqual(obj.dt, dt) data = serializers.serialize('json', [Event(dt=dt)]) self.assert_json_contains_datetime(data, "2011-09-01T17:20:30.405+07:00") obj = next(serializers.deserialize('json', data)).object self.assertEqual(obj.dt, dt.replace(microsecond=405000)) data = serializers.serialize('xml', [Event(dt=dt)]) self.assert_xml_contains_datetime(data, "2011-09-01T17:20:30.405060+07:00") obj = next(serializers.deserialize('xml', data)).object self.assertEqual(obj.dt, dt) if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer): data = serializers.serialize('yaml', [Event(dt=dt)], default_flow_style=None) self.assert_yaml_contains_datetime(data, "2011-09-01 17:20:30.405060+07:00") obj = next(serializers.deserialize('yaml', data)).object if HAS_YAML and yaml.__version__ < '5.3': self.assertEqual(obj.dt.replace(tzinfo=UTC), dt) else: self.assertEqual(obj.dt, dt) def test_aware_datetime_in_utc(self): dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC) data = serializers.serialize('python', [Event(dt=dt)]) self.assert_python_contains_datetime(data, dt) obj = next(serializers.deserialize('python', data)).object self.assertEqual(obj.dt, dt) data = serializers.serialize('json', [Event(dt=dt)]) self.assert_json_contains_datetime(data, "2011-09-01T10:20:30Z") obj = next(serializers.deserialize('json', data)).object self.assertEqual(obj.dt, dt) data = serializers.serialize('xml', [Event(dt=dt)]) self.assert_xml_contains_datetime(data, "2011-09-01T10:20:30+00:00") obj = next(serializers.deserialize('xml', data)).object self.assertEqual(obj.dt, dt) if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer): data = serializers.serialize('yaml', [Event(dt=dt)], default_flow_style=None) self.assert_yaml_contains_datetime(data, "2011-09-01 10:20:30+00:00") obj = next(serializers.deserialize('yaml', data)).object self.assertEqual(obj.dt.replace(tzinfo=UTC), dt) def test_aware_datetime_in_local_timezone(self): dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT) data = serializers.serialize('python', [Event(dt=dt)]) self.assert_python_contains_datetime(data, dt) obj = next(serializers.deserialize('python', data)).object self.assertEqual(obj.dt, dt) data = serializers.serialize('json', [Event(dt=dt)]) self.assert_json_contains_datetime(data, "2011-09-01T13:20:30+03:00") obj = next(serializers.deserialize('json', data)).object self.assertEqual(obj.dt, dt) data = serializers.serialize('xml', [Event(dt=dt)]) self.assert_xml_contains_datetime(data, "2011-09-01T13:20:30+03:00") obj = next(serializers.deserialize('xml', data)).object self.assertEqual(obj.dt, dt) if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer): data = serializers.serialize('yaml', [Event(dt=dt)], default_flow_style=None) self.assert_yaml_contains_datetime(data, "2011-09-01 13:20:30+03:00") obj = next(serializers.deserialize('yaml', data)).object if HAS_YAML and yaml.__version__ < '5.3': self.assertEqual(obj.dt.replace(tzinfo=UTC), dt) else: self.assertEqual(obj.dt, dt) def test_aware_datetime_in_other_timezone(self): dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT) data = serializers.serialize('python', [Event(dt=dt)]) self.assert_python_contains_datetime(data, dt) obj = next(serializers.deserialize('python', data)).object self.assertEqual(obj.dt, dt) data = serializers.serialize('json', [Event(dt=dt)]) self.assert_json_contains_datetime(data, "2011-09-01T17:20:30+07:00") obj = next(serializers.deserialize('json', data)).object self.assertEqual(obj.dt, dt) data = serializers.serialize('xml', [Event(dt=dt)]) self.assert_xml_contains_datetime(data, "2011-09-01T17:20:30+07:00") obj = next(serializers.deserialize('xml', data)).object self.assertEqual(obj.dt, dt) if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer): data = serializers.serialize('yaml', [Event(dt=dt)], default_flow_style=None) self.assert_yaml_contains_datetime(data, "2011-09-01 17:20:30+07:00") obj = next(serializers.deserialize('yaml', data)).object if HAS_YAML and yaml.__version__ < '5.3': self.assertEqual(obj.dt.replace(tzinfo=UTC), dt) else: self.assertEqual(obj.dt, dt) @override_settings(DATETIME_FORMAT='c', TIME_ZONE='Africa/Nairobi', USE_L10N=False, USE_TZ=True) class TemplateTests(SimpleTestCase): @requires_tz_support def test_localtime_templatetag_and_filters(self): """ Test the {% localtime %} templatetag and related filters. """ datetimes = { 'utc': datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC), 'eat': datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT), 'ict': datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT), 'naive': datetime.datetime(2011, 9, 1, 13, 20, 30), } templates = { 'notag': Template("{% load tz %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:ICT }}"), 'noarg': Template( "{% load tz %}{% localtime %}{{ dt }}|{{ dt|localtime }}|" "{{ dt|utc }}|{{ dt|timezone:ICT }}{% endlocaltime %}" ), 'on': Template( "{% load tz %}{% localtime on %}{{ dt }}|{{ dt|localtime }}|" "{{ dt|utc }}|{{ dt|timezone:ICT }}{% endlocaltime %}" ), 'off': Template( "{% load tz %}{% localtime off %}{{ dt }}|{{ dt|localtime }}|" "{{ dt|utc }}|{{ dt|timezone:ICT }}{% endlocaltime %}" ), } # Transform a list of keys in 'datetimes' to the expected template # output. This makes the definition of 'results' more readable. def t(*result): return '|'.join(datetimes[key].isoformat() for key in result) # Results for USE_TZ = True results = { 'utc': { 'notag': t('eat', 'eat', 'utc', 'ict'), 'noarg': t('eat', 'eat', 'utc', 'ict'), 'on': t('eat', 'eat', 'utc', 'ict'), 'off': t('utc', 'eat', 'utc', 'ict'), }, 'eat': { 'notag': t('eat', 'eat', 'utc', 'ict'), 'noarg': t('eat', 'eat', 'utc', 'ict'), 'on': t('eat', 'eat', 'utc', 'ict'), 'off': t('eat', 'eat', 'utc', 'ict'), }, 'ict': { 'notag': t('eat', 'eat', 'utc', 'ict'), 'noarg': t('eat', 'eat', 'utc', 'ict'), 'on': t('eat', 'eat', 'utc', 'ict'), 'off': t('ict', 'eat', 'utc', 'ict'), }, 'naive': { 'notag': t('naive', 'eat', 'utc', 'ict'), 'noarg': t('naive', 'eat', 'utc', 'ict'), 'on': t('naive', 'eat', 'utc', 'ict'), 'off': t('naive', 'eat', 'utc', 'ict'), } } for k1, dt in datetimes.items(): for k2, tpl in templates.items(): ctx = Context({'dt': dt, 'ICT': ICT}) actual = tpl.render(ctx) expected = results[k1][k2] self.assertEqual(actual, expected, '%s / %s: %r != %r' % (k1, k2, actual, expected)) # Changes for USE_TZ = False results['utc']['notag'] = t('utc', 'eat', 'utc', 'ict') results['ict']['notag'] = t('ict', 'eat', 'utc', 'ict') with self.settings(USE_TZ=False): for k1, dt in datetimes.items(): for k2, tpl in templates.items(): ctx = Context({'dt': dt, 'ICT': ICT}) actual = tpl.render(ctx) expected = results[k1][k2] self.assertEqual(actual, expected, '%s / %s: %r != %r' % (k1, k2, actual, expected)) def test_localtime_filters_with_pytz(self): """ Test the |localtime, |utc, and |timezone filters with pytz. """ # Use a pytz timezone as local time tpl = Template("{% load tz %}{{ dt|localtime }}|{{ dt|utc }}") ctx = Context({'dt': datetime.datetime(2011, 9, 1, 12, 20, 30)}) with self.settings(TIME_ZONE='Europe/Paris'): self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00|2011-09-01T10:20:30+00:00") # Use a pytz timezone as argument tpl = Template("{% load tz %}{{ dt|timezone:tz }}") ctx = Context({ 'dt': datetime.datetime(2011, 9, 1, 13, 20, 30), 'tz': pytz.timezone('Europe/Paris'), }) self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00") # Use a pytz timezone name as argument tpl = Template("{% load tz %}{{ dt|timezone:'Europe/Paris' }}") ctx = Context({ 'dt': datetime.datetime(2011, 9, 1, 13, 20, 30), 'tz': pytz.timezone('Europe/Paris'), }) self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00") def test_localtime_templatetag_invalid_argument(self): with self.assertRaises(TemplateSyntaxError): Template("{% load tz %}{% localtime foo %}{% endlocaltime %}").render() def test_localtime_filters_do_not_raise_exceptions(self): """ Test the |localtime, |utc, and |timezone filters on bad inputs. """ tpl = Template("{% load tz %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:tz }}") with self.settings(USE_TZ=True): # bad datetime value ctx = Context({'dt': None, 'tz': ICT}) self.assertEqual(tpl.render(ctx), "None|||") ctx = Context({'dt': 'not a date', 'tz': ICT}) self.assertEqual(tpl.render(ctx), "not a date|||") # bad timezone value tpl = Template("{% load tz %}{{ dt|timezone:tz }}") ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30), 'tz': None}) self.assertEqual(tpl.render(ctx), "") ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30), 'tz': 'not a tz'}) self.assertEqual(tpl.render(ctx), "") @requires_tz_support def test_timezone_templatetag(self): """ Test the {% timezone %} templatetag. """ tpl = Template( "{% load tz %}" "{{ dt }}|" "{% timezone tz1 %}" "{{ dt }}|" "{% timezone tz2 %}" "{{ dt }}" "{% endtimezone %}" "{% endtimezone %}" ) ctx = Context({ 'dt': datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC), 'tz1': ICT, 'tz2': None, }) self.assertEqual( tpl.render(ctx), "2011-09-01T13:20:30+03:00|2011-09-01T17:20:30+07:00|2011-09-01T13:20:30+03:00" ) def test_timezone_templatetag_with_pytz(self): """ Test the {% timezone %} templatetag with pytz. """ tpl = Template("{% load tz %}{% timezone tz %}{{ dt }}{% endtimezone %}") # Use a pytz timezone as argument ctx = Context({ 'dt': datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT), 'tz': pytz.timezone('Europe/Paris'), }) self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00") # Use a pytz timezone name as argument ctx = Context({ 'dt': datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT), 'tz': 'Europe/Paris', }) self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00") def test_timezone_templatetag_invalid_argument(self): with self.assertRaises(TemplateSyntaxError): Template("{% load tz %}{% timezone %}{% endtimezone %}").render() with self.assertRaises(pytz.UnknownTimeZoneError): Template("{% load tz %}{% timezone tz %}{% endtimezone %}").render(Context({'tz': 'foobar'})) @skipIf(sys.platform == 'win32', "Windows uses non-standard time zone names") def test_get_current_timezone_templatetag(self): """ Test the {% get_current_timezone %} templatetag. """ tpl = Template("{% load tz %}{% get_current_timezone as time_zone %}{{ time_zone }}") self.assertEqual(tpl.render(Context()), "Africa/Nairobi") with timezone.override(UTC): self.assertEqual(tpl.render(Context()), "UTC") tpl = Template( "{% load tz %}{% timezone tz %}{% get_current_timezone as time_zone %}" "{% endtimezone %}{{ time_zone }}" ) self.assertEqual(tpl.render(Context({'tz': ICT})), "+0700") with timezone.override(UTC): self.assertEqual(tpl.render(Context({'tz': ICT})), "+0700") def test_get_current_timezone_templatetag_with_pytz(self): """ Test the {% get_current_timezone %} templatetag with pytz. """ tpl = Template("{% load tz %}{% get_current_timezone as time_zone %}{{ time_zone }}") with timezone.override(pytz.timezone('Europe/Paris')): self.assertEqual(tpl.render(Context()), "Europe/Paris") tpl = Template( "{% load tz %}{% timezone 'Europe/Paris' %}" "{% get_current_timezone as time_zone %}{% endtimezone %}" "{{ time_zone }}" ) self.assertEqual(tpl.render(Context()), "Europe/Paris") def test_get_current_timezone_templatetag_invalid_argument(self): msg = "'get_current_timezone' requires 'as variable' (got ['get_current_timezone'])" with self.assertRaisesMessage(TemplateSyntaxError, msg): Template("{% load tz %}{% get_current_timezone %}").render() @skipIf(sys.platform == 'win32', "Windows uses non-standard time zone names") def test_tz_template_context_processor(self): """ Test the django.template.context_processors.tz template context processor. """ tpl = Template("{{ TIME_ZONE }}") context = Context() self.assertEqual(tpl.render(context), "") request_context = RequestContext(HttpRequest(), processors=[context_processors.tz]) self.assertEqual(tpl.render(request_context), "Africa/Nairobi") @requires_tz_support def test_date_and_time_template_filters(self): tpl = Template("{{ dt|date:'Y-m-d' }} at {{ dt|time:'H:i:s' }}") ctx = Context({'dt': datetime.datetime(2011, 9, 1, 20, 20, 20, tzinfo=UTC)}) self.assertEqual(tpl.render(ctx), "2011-09-01 at 23:20:20") with timezone.override(ICT): self.assertEqual(tpl.render(ctx), "2011-09-02 at 03:20:20") def test_date_and_time_template_filters_honor_localtime(self): tpl = Template( "{% load tz %}{% localtime off %}{{ dt|date:'Y-m-d' }} at " "{{ dt|time:'H:i:s' }}{% endlocaltime %}" ) ctx = Context({'dt': datetime.datetime(2011, 9, 1, 20, 20, 20, tzinfo=UTC)}) self.assertEqual(tpl.render(ctx), "2011-09-01 at 20:20:20") with timezone.override(ICT): self.assertEqual(tpl.render(ctx), "2011-09-01 at 20:20:20") @requires_tz_support def test_now_template_tag_uses_current_time_zone(self): # Regression for #17343 tpl = Template("{% now \"O\" %}") self.assertEqual(tpl.render(Context({})), "+0300") with timezone.override(ICT): self.assertEqual(tpl.render(Context({})), "+0700") @override_settings(DATETIME_FORMAT='c', TIME_ZONE='Africa/Nairobi', USE_L10N=False, USE_TZ=False) class LegacyFormsTests(TestCase): def test_form(self): form = EventForm({'dt': '2011-09-01 13:20:30'}) self.assertTrue(form.is_valid()) self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 13, 20, 30)) def test_form_with_non_existent_time(self): form = EventForm({'dt': '2011-03-27 02:30:00'}) with timezone.override(pytz.timezone('Europe/Paris')): # this is obviously a bug self.assertTrue(form.is_valid()) self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 3, 27, 2, 30, 0)) def test_form_with_ambiguous_time(self): form = EventForm({'dt': '2011-10-30 02:30:00'}) with timezone.override(pytz.timezone('Europe/Paris')): # this is obviously a bug self.assertTrue(form.is_valid()) self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 10, 30, 2, 30, 0)) def test_split_form(self): form = EventSplitForm({'dt_0': '2011-09-01', 'dt_1': '13:20:30'}) self.assertTrue(form.is_valid()) self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 13, 20, 30)) def test_model_form(self): EventModelForm({'dt': '2011-09-01 13:20:30'}).save() e = Event.objects.get() self.assertEqual(e.dt, datetime.datetime(2011, 9, 1, 13, 20, 30)) @override_settings(DATETIME_FORMAT='c', TIME_ZONE='Africa/Nairobi', USE_L10N=False, USE_TZ=True) class NewFormsTests(TestCase): @requires_tz_support def test_form(self): form = EventForm({'dt': '2011-09-01 13:20:30'}) self.assertTrue(form.is_valid()) self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)) def test_form_with_other_timezone(self): form = EventForm({'dt': '2011-09-01 17:20:30'}) with timezone.override(ICT): self.assertTrue(form.is_valid()) self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)) def test_form_with_non_existent_time(self): with timezone.override(pytz.timezone('Europe/Paris')): form = EventForm({'dt': '2011-03-27 02:30:00'}) self.assertFalse(form.is_valid()) self.assertEqual( form.errors['dt'], [ '2011-03-27 02:30:00 couldn’t be interpreted in time zone ' 'Europe/Paris; it may be ambiguous or it may not exist.' ] ) def test_form_with_ambiguous_time(self): with timezone.override(pytz.timezone('Europe/Paris')): form = EventForm({'dt': '2011-10-30 02:30:00'}) self.assertFalse(form.is_valid()) self.assertEqual( form.errors['dt'], [ '2011-10-30 02:30:00 couldn’t be interpreted in time zone ' 'Europe/Paris; it may be ambiguous or it may not exist.' ] ) @requires_tz_support def test_split_form(self): form = EventSplitForm({'dt_0': '2011-09-01', 'dt_1': '13:20:30'}) self.assertTrue(form.is_valid()) self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)) @requires_tz_support def test_localized_form(self): form = EventLocalizedForm(initial={'dt': datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)}) with timezone.override(ICT): self.assertIn("2011-09-01 17:20:30", str(form)) @requires_tz_support def test_model_form(self): EventModelForm({'dt': '2011-09-01 13:20:30'}).save() e = Event.objects.get() self.assertEqual(e.dt, datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)) @requires_tz_support def test_localized_model_form(self): form = EventLocalizedModelForm(instance=Event(dt=datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT))) with timezone.override(ICT): self.assertIn("2011-09-01 17:20:30", str(form)) @override_settings( DATETIME_FORMAT='c', TIME_ZONE='Africa/Nairobi', USE_L10N=False, USE_TZ=True, ROOT_URLCONF='timezones.urls', ) class AdminTests(TestCase): @classmethod def setUpTestData(cls): cls.u1 = User.objects.create_user( password='secret', last_login=datetime.datetime(2007, 5, 30, 13, 20, 10, tzinfo=UTC), is_superuser=True, username='super', first_name='Super', last_name='User', email='[email protected]', is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10, tzinfo=UTC), ) def setUp(self): self.client.force_login(self.u1) @requires_tz_support def test_changelist(self): e = Event.objects.create(dt=datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)) response = self.client.get(reverse('admin_tz:timezones_event_changelist')) self.assertContains(response, e.dt.astimezone(EAT).isoformat()) def test_changelist_in_other_timezone(self): e = Event.objects.create(dt=datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)) with timezone.override(ICT): response = self.client.get(reverse('admin_tz:timezones_event_changelist')) self.assertContains(response, e.dt.astimezone(ICT).isoformat()) @requires_tz_support def test_change_editable(self): e = Event.objects.create(dt=datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)) response = self.client.get(reverse('admin_tz:timezones_event_change', args=(e.pk,))) self.assertContains(response, e.dt.astimezone(EAT).date().isoformat()) self.assertContains(response, e.dt.astimezone(EAT).time().isoformat()) def test_change_editable_in_other_timezone(self): e = Event.objects.create(dt=datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)) with timezone.override(ICT): response = self.client.get(reverse('admin_tz:timezones_event_change', args=(e.pk,))) self.assertContains(response, e.dt.astimezone(ICT).date().isoformat()) self.assertContains(response, e.dt.astimezone(ICT).time().isoformat()) @requires_tz_support def test_change_readonly(self): Timestamp.objects.create() # re-fetch the object for backends that lose microseconds (MySQL) t = Timestamp.objects.get() response = self.client.get(reverse('admin_tz:timezones_timestamp_change', args=(t.pk,))) self.assertContains(response, t.created.astimezone(EAT).isoformat()) def test_change_readonly_in_other_timezone(self): Timestamp.objects.create() # re-fetch the object for backends that lose microseconds (MySQL) t = Timestamp.objects.get() with timezone.override(ICT): response = self.client.get(reverse('admin_tz:timezones_timestamp_change', args=(t.pk,))) self.assertContains(response, t.created.astimezone(ICT).isoformat())
20f711ce0966fcb704290db4034d112f3ac8480697f661c400a649cb39284250
import datetime import decimal import ipaddress import uuid from django.db import models from django.template import Context, Template from django.test import SimpleTestCase from django.utils.functional import Promise from django.utils.translation import gettext_lazy as _ class Suit(models.IntegerChoices): DIAMOND = 1, _('Diamond') SPADE = 2, _('Spade') HEART = 3, _('Heart') CLUB = 4, _('Club') class YearInSchool(models.TextChoices): FRESHMAN = 'FR', _('Freshman') SOPHOMORE = 'SO', _('Sophomore') JUNIOR = 'JR', _('Junior') SENIOR = 'SR', _('Senior') GRADUATE = 'GR', _('Graduate') class Vehicle(models.IntegerChoices): CAR = 1, 'Carriage' TRUCK = 2 JET_SKI = 3 __empty__ = _('(Unknown)') class Gender(models.TextChoices): MALE = 'M' FEMALE = 'F' NOT_SPECIFIED = 'X' __empty__ = '(Undeclared)' class ChoicesTests(SimpleTestCase): def test_integerchoices(self): self.assertEqual(Suit.choices, [(1, 'Diamond'), (2, 'Spade'), (3, 'Heart'), (4, 'Club')]) self.assertEqual(Suit.labels, ['Diamond', 'Spade', 'Heart', 'Club']) self.assertEqual(Suit.values, [1, 2, 3, 4]) self.assertEqual(Suit.names, ['DIAMOND', 'SPADE', 'HEART', 'CLUB']) self.assertEqual(repr(Suit.DIAMOND), '<Suit.DIAMOND: 1>') self.assertEqual(Suit.DIAMOND.label, 'Diamond') self.assertEqual(Suit.DIAMOND.value, 1) self.assertEqual(Suit['DIAMOND'], Suit.DIAMOND) self.assertEqual(Suit(1), Suit.DIAMOND) self.assertIsInstance(Suit, type(models.Choices)) self.assertIsInstance(Suit.DIAMOND, Suit) self.assertIsInstance(Suit.DIAMOND.label, Promise) self.assertIsInstance(Suit.DIAMOND.value, int) def test_integerchoices_auto_label(self): self.assertEqual(Vehicle.CAR.label, 'Carriage') self.assertEqual(Vehicle.TRUCK.label, 'Truck') self.assertEqual(Vehicle.JET_SKI.label, 'Jet Ski') def test_integerchoices_empty_label(self): self.assertEqual(Vehicle.choices[0], (None, '(Unknown)')) self.assertEqual(Vehicle.labels[0], '(Unknown)') self.assertIsNone(Vehicle.values[0]) self.assertEqual(Vehicle.names[0], '__empty__') def test_integerchoices_functional_api(self): Place = models.IntegerChoices('Place', 'FIRST SECOND THIRD') self.assertEqual(Place.labels, ['First', 'Second', 'Third']) self.assertEqual(Place.values, [1, 2, 3]) self.assertEqual(Place.names, ['FIRST', 'SECOND', 'THIRD']) def test_integerchoices_containment(self): self.assertIn(Suit.DIAMOND, Suit) self.assertIn(1, Suit) self.assertNotIn(0, Suit) def test_textchoices(self): self.assertEqual(YearInSchool.choices, [ ('FR', 'Freshman'), ('SO', 'Sophomore'), ('JR', 'Junior'), ('SR', 'Senior'), ('GR', 'Graduate'), ]) self.assertEqual(YearInSchool.labels, ['Freshman', 'Sophomore', 'Junior', 'Senior', 'Graduate']) self.assertEqual(YearInSchool.values, ['FR', 'SO', 'JR', 'SR', 'GR']) self.assertEqual(YearInSchool.names, ['FRESHMAN', 'SOPHOMORE', 'JUNIOR', 'SENIOR', 'GRADUATE']) self.assertEqual(repr(YearInSchool.FRESHMAN), "<YearInSchool.FRESHMAN: 'FR'>") self.assertEqual(YearInSchool.FRESHMAN.label, 'Freshman') self.assertEqual(YearInSchool.FRESHMAN.value, 'FR') self.assertEqual(YearInSchool['FRESHMAN'], YearInSchool.FRESHMAN) self.assertEqual(YearInSchool('FR'), YearInSchool.FRESHMAN) self.assertIsInstance(YearInSchool, type(models.Choices)) self.assertIsInstance(YearInSchool.FRESHMAN, YearInSchool) self.assertIsInstance(YearInSchool.FRESHMAN.label, Promise) self.assertIsInstance(YearInSchool.FRESHMAN.value, str) def test_textchoices_auto_label(self): self.assertEqual(Gender.MALE.label, 'Male') self.assertEqual(Gender.FEMALE.label, 'Female') self.assertEqual(Gender.NOT_SPECIFIED.label, 'Not Specified') def test_textchoices_empty_label(self): self.assertEqual(Gender.choices[0], (None, '(Undeclared)')) self.assertEqual(Gender.labels[0], '(Undeclared)') self.assertIsNone(Gender.values[0]) self.assertEqual(Gender.names[0], '__empty__') def test_textchoices_functional_api(self): Medal = models.TextChoices('Medal', 'GOLD SILVER BRONZE') self.assertEqual(Medal.labels, ['Gold', 'Silver', 'Bronze']) self.assertEqual(Medal.values, ['GOLD', 'SILVER', 'BRONZE']) self.assertEqual(Medal.names, ['GOLD', 'SILVER', 'BRONZE']) def test_textchoices_containment(self): self.assertIn(YearInSchool.FRESHMAN, YearInSchool) self.assertIn('FR', YearInSchool) self.assertNotIn('XX', YearInSchool) def test_textchoices_blank_value(self): class BlankStr(models.TextChoices): EMPTY = '', '(Empty)' ONE = 'ONE', 'One' self.assertEqual(BlankStr.labels, ['(Empty)', 'One']) self.assertEqual(BlankStr.values, ['', 'ONE']) self.assertEqual(BlankStr.names, ['EMPTY', 'ONE']) def test_invalid_definition(self): msg = "'str' object cannot be interpreted as an integer" with self.assertRaisesMessage(TypeError, msg): class InvalidArgumentEnum(models.IntegerChoices): # A string is not permitted as the second argument to int(). ONE = 1, 'X', 'Invalid' msg = "duplicate values found in <enum 'Fruit'>: PINEAPPLE -> APPLE" with self.assertRaisesMessage(ValueError, msg): class Fruit(models.IntegerChoices): APPLE = 1, 'Apple' PINEAPPLE = 1, 'Pineapple' def test_str(self): for test in [Gender, Suit, YearInSchool, Vehicle]: for member in test: with self.subTest(member=member): self.assertEqual(str(test[member.name]), str(member.value)) def test_templates(self): template = Template('{{ Suit.DIAMOND.label }}|{{ Suit.DIAMOND.value }}') output = template.render(Context({'Suit': Suit})) self.assertEqual(output, 'Diamond|1') class Separator(bytes, models.Choices): FS = b'\x1c', 'File Separator' GS = b'\x1d', 'Group Separator' RS = b'\x1e', 'Record Separator' US = b'\x1f', 'Unit Separator' class Constants(float, models.Choices): PI = 3.141592653589793, 'π' TAU = 6.283185307179586, 'τ' class Set(frozenset, models.Choices): A = {1, 2} B = {2, 3} UNION = A | B DIFFERENCE = A - B INTERSECTION = A & B class MoonLandings(datetime.date, models.Choices): APOLLO_11 = 1969, 7, 20, 'Apollo 11 (Eagle)' APOLLO_12 = 1969, 11, 19, 'Apollo 12 (Intrepid)' APOLLO_14 = 1971, 2, 5, 'Apollo 14 (Antares)' APOLLO_15 = 1971, 7, 30, 'Apollo 15 (Falcon)' APOLLO_16 = 1972, 4, 21, 'Apollo 16 (Orion)' APOLLO_17 = 1972, 12, 11, 'Apollo 17 (Challenger)' class DateAndTime(datetime.datetime, models.Choices): A = 2010, 10, 10, 10, 10, 10 B = 2011, 11, 11, 11, 11, 11 C = 2012, 12, 12, 12, 12, 12 class MealTimes(datetime.time, models.Choices): BREAKFAST = 7, 0 LUNCH = 13, 0 DINNER = 18, 30 class Frequency(datetime.timedelta, models.Choices): WEEK = 0, 0, 0, 0, 0, 0, 1, 'Week' DAY = 1, 'Day' HOUR = 0, 0, 0, 0, 0, 1, 'Hour' MINUTE = 0, 0, 0, 0, 1, 'Hour' SECOND = 0, 1, 'Second' class Number(decimal.Decimal, models.Choices): E = 2.718281828459045, 'e' PI = '3.141592653589793', 'π' TAU = decimal.Decimal('6.283185307179586'), 'τ' class IPv4Address(ipaddress.IPv4Address, models.Choices): LOCALHOST = '127.0.0.1', 'Localhost' GATEWAY = '192.168.0.1', 'Gateway' BROADCAST = '192.168.0.255', 'Broadcast' class IPv6Address(ipaddress.IPv6Address, models.Choices): LOCALHOST = '::1', 'Localhost' UNSPECIFIED = '::', 'Unspecified' class IPv4Network(ipaddress.IPv4Network, models.Choices): LOOPBACK = '127.0.0.0/8', 'Loopback' LINK_LOCAL = '169.254.0.0/16', 'Link-Local' PRIVATE_USE_A = '10.0.0.0/8', 'Private-Use (Class A)' class IPv6Network(ipaddress.IPv6Network, models.Choices): LOOPBACK = '::1/128', 'Loopback' UNSPECIFIED = '::/128', 'Unspecified' UNIQUE_LOCAL = 'fc00::/7', 'Unique-Local' LINK_LOCAL_UNICAST = 'fe80::/10', 'Link-Local Unicast' class CustomChoicesTests(SimpleTestCase): def test_labels_valid(self): enums = ( Separator, Constants, Set, MoonLandings, DateAndTime, MealTimes, Frequency, Number, IPv4Address, IPv6Address, IPv4Network, IPv6Network, ) for choice_enum in enums: with self.subTest(choice_enum.__name__): self.assertNotIn(None, choice_enum.labels) def test_bool_unsupported(self): msg = "type 'bool' is not an acceptable base type" with self.assertRaisesMessage(TypeError, msg): class Boolean(bool, models.Choices): pass def test_timezone_unsupported(self): msg = "type 'datetime.timezone' is not an acceptable base type" with self.assertRaisesMessage(TypeError, msg): class Timezone(datetime.timezone, models.Choices): pass def test_uuid_unsupported(self): msg = 'UUID objects are immutable' with self.assertRaisesMessage(TypeError, msg): class Identifier(uuid.UUID, models.Choices): A = '972ce4eb-a95f-4a56-9339-68c208a76f18'
d5f507fd5a978d334517b9ff148de1f9a6cf1245a2fb60b3a3eda9c3376681b3
from django.forms import FileInput from .base import WidgetTest class FileInputTest(WidgetTest): widget = FileInput() def test_render(self): """ FileInput widgets never render the value attribute. The old value isn't useful if a form is updated or an error occurred. """ self.check_html(self.widget, 'email', '[email protected]', html='<input type="file" name="email">') self.check_html(self.widget, 'email', '', html='<input type="file" name="email">') self.check_html(self.widget, 'email', None, html='<input type="file" name="email">') def test_value_omitted_from_data(self): self.assertIs(self.widget.value_omitted_from_data({}, {}, 'field'), True) self.assertIs(self.widget.value_omitted_from_data({}, {'field': 'value'}, 'field'), False) def test_use_required_attribute(self): # False when initial data exists. The file input is left blank by the # user to keep the existing, initial value. self.assertIs(self.widget.use_required_attribute(None), True) self.assertIs(self.widget.use_required_attribute('resume.txt'), False)
7c9592f933786c282019eff0cd8409a249cef249de4687b9f333e1990804b628
from datetime import date, datetime from django.forms import DateTimeField, ValidationError from django.test import SimpleTestCase from django.utils.timezone import get_fixed_timezone, utc class DateTimeFieldTest(SimpleTestCase): def test_datetimefield_clean(self): tests = [ (date(2006, 10, 25), datetime(2006, 10, 25, 0, 0)), (datetime(2006, 10, 25, 14, 30), datetime(2006, 10, 25, 14, 30)), (datetime(2006, 10, 25, 14, 30, 59), datetime(2006, 10, 25, 14, 30, 59)), ( datetime(2006, 10, 25, 14, 30, 59, 200), datetime(2006, 10, 25, 14, 30, 59, 200), ), ('2006-10-25 14:30:45.000200', datetime(2006, 10, 25, 14, 30, 45, 200)), ('2006-10-25 14:30:45.0002', datetime(2006, 10, 25, 14, 30, 45, 200)), ('2006-10-25 14:30:45', datetime(2006, 10, 25, 14, 30, 45)), ('2006-10-25 14:30:00', datetime(2006, 10, 25, 14, 30)), ('2006-10-25 14:30', datetime(2006, 10, 25, 14, 30)), ('2006-10-25', datetime(2006, 10, 25, 0, 0)), ('10/25/2006 14:30:45.000200', datetime(2006, 10, 25, 14, 30, 45, 200)), ('10/25/2006 14:30:45', datetime(2006, 10, 25, 14, 30, 45)), ('10/25/2006 14:30:00', datetime(2006, 10, 25, 14, 30)), ('10/25/2006 14:30', datetime(2006, 10, 25, 14, 30)), ('10/25/2006', datetime(2006, 10, 25, 0, 0)), ('10/25/06 14:30:45.000200', datetime(2006, 10, 25, 14, 30, 45, 200)), ('10/25/06 14:30:45', datetime(2006, 10, 25, 14, 30, 45)), ('10/25/06 14:30:00', datetime(2006, 10, 25, 14, 30)), ('10/25/06 14:30', datetime(2006, 10, 25, 14, 30)), ('10/25/06', datetime(2006, 10, 25, 0, 0)), # ISO 8601 formats. ( '2014-09-23T22:34:41.614804', datetime(2014, 9, 23, 22, 34, 41, 614804), ), ('2014-09-23T22:34:41', datetime(2014, 9, 23, 22, 34, 41)), ('2014-09-23T22:34', datetime(2014, 9, 23, 22, 34)), ('2014-09-23', datetime(2014, 9, 23, 0, 0)), ('2014-09-23T22:34Z', datetime(2014, 9, 23, 22, 34, tzinfo=utc)), ( '2014-09-23T22:34+07:00', datetime(2014, 9, 23, 22, 34, tzinfo=get_fixed_timezone(420)), ), # Whitespace stripping. (' 2006-10-25 14:30:45 ', datetime(2006, 10, 25, 14, 30, 45)), (' 2006-10-25 ', datetime(2006, 10, 25, 0, 0)), (' 10/25/2006 14:30:45 ', datetime(2006, 10, 25, 14, 30, 45)), (' 10/25/2006 14:30 ', datetime(2006, 10, 25, 14, 30)), (' 10/25/2006 ', datetime(2006, 10, 25, 0, 0)), (' 10/25/06 14:30:45 ', datetime(2006, 10, 25, 14, 30, 45)), (' 10/25/06 ', datetime(2006, 10, 25, 0, 0)), ( ' 2014-09-23T22:34:41.614804 ', datetime(2014, 9, 23, 22, 34, 41, 614804), ), (' 2014-09-23T22:34Z ', datetime(2014, 9, 23, 22, 34, tzinfo=utc)), ] f = DateTimeField() for value, expected_datetime in tests: with self.subTest(value=value): self.assertEqual(f.clean(value), expected_datetime) def test_datetimefield_clean_invalid(self): f = DateTimeField() msg = "'Enter a valid date/time.'" with self.assertRaisesMessage(ValidationError, msg): f.clean('hello') with self.assertRaisesMessage(ValidationError, msg): f.clean('2006-10-25 4:30 p.m.') with self.assertRaisesMessage(ValidationError, msg): f.clean(' ') with self.assertRaisesMessage(ValidationError, msg): f.clean('2014-09-23T28:23') f = DateTimeField(input_formats=['%Y %m %d %I:%M %p']) with self.assertRaisesMessage(ValidationError, msg): f.clean('2006.10.25 14:30:45') def test_datetimefield_clean_input_formats(self): tests = [ ('%Y %m %d %I:%M %p', ( (date(2006, 10, 25), datetime(2006, 10, 25, 0, 0)), (datetime(2006, 10, 25, 14, 30), datetime(2006, 10, 25, 14, 30)), ( datetime(2006, 10, 25, 14, 30, 59), datetime(2006, 10, 25, 14, 30, 59), ), ( datetime(2006, 10, 25, 14, 30, 59, 200), datetime(2006, 10, 25, 14, 30, 59, 200), ), ('2006 10 25 2:30 PM', datetime(2006, 10, 25, 14, 30)), # ISO-like formats are always accepted. ('2006-10-25 14:30:45', datetime(2006, 10, 25, 14, 30, 45)), )), ('%Y.%m.%d %H:%M:%S.%f', ( ( '2006.10.25 14:30:45.0002', datetime(2006, 10, 25, 14, 30, 45, 200), ), )), ] f = DateTimeField() for input_format, values in tests: f = DateTimeField(input_formats=[input_format]) for value, expected_datetime in values: with self.subTest(value=value, input_format=input_format): self.assertEqual(f.clean(value), expected_datetime) def test_datetimefield_not_required(self): f = DateTimeField(required=False) self.assertIsNone(f.clean(None)) self.assertEqual('None', repr(f.clean(None))) self.assertIsNone(f.clean('')) self.assertEqual('None', repr(f.clean(''))) def test_datetimefield_changed(self): f = DateTimeField(input_formats=['%Y %m %d %I:%M %p']) d = datetime(2006, 9, 17, 14, 30, 0) self.assertFalse(f.has_changed(d, '2006 09 17 2:30 PM'))
eec71d1e01cce483a8a046731602dcf1ad377fdc22ab679e4867dea73c0046ef
from datetime import date, datetime, time from django import forms from django.test import SimpleTestCase, override_settings from django.utils.translation import activate, deactivate @override_settings(TIME_INPUT_FORMATS=["%I:%M:%S %p", "%I:%M %p"], USE_L10N=True) class LocalizedTimeTests(SimpleTestCase): def setUp(self): # nl/formats.py has customized TIME_INPUT_FORMATS: # ['%H:%M:%S', '%H.%M:%S', '%H.%M', '%H:%M'] activate('nl') def tearDown(self): deactivate() def test_timeField(self): "TimeFields can parse dates in the default format" f = forms.TimeField() # Parse a time in an unaccepted format; get an error with self.assertRaises(forms.ValidationError): f.clean('1:30:05 PM') # Parse a time in a valid format, get a parsed result result = f.clean('13:30:05') self.assertEqual(result, time(13, 30, 5)) # The parsed result does a round trip text = f.widget.format_value(result) self.assertEqual(text, '13:30:05') # Parse a time in a valid, but non-default format, get a parsed result result = f.clean('13:30') self.assertEqual(result, time(13, 30, 0)) # The parsed result does a round trip to default format text = f.widget.format_value(result) self.assertEqual(text, "13:30:00") # ISO formats are accepted, even if not specified in formats.py result = f.clean('13:30:05.000155') self.assertEqual(result, time(13, 30, 5, 155)) def test_localized_timeField(self): "Localized TimeFields act as unlocalized widgets" f = forms.TimeField(localize=True) # Parse a time in an unaccepted format; get an error with self.assertRaises(forms.ValidationError): f.clean('1:30:05 PM') # Parse a time in a valid format, get a parsed result result = f.clean('13:30:05') self.assertEqual(result, time(13, 30, 5)) # The parsed result does a round trip to the same format text = f.widget.format_value(result) self.assertEqual(text, '13:30:05') # Parse a time in a valid format, get a parsed result result = f.clean('13:30') self.assertEqual(result, time(13, 30, 0)) # The parsed result does a round trip to default format text = f.widget.format_value(result) self.assertEqual(text, "13:30:00") def test_timeField_with_inputformat(self): "TimeFields with manually specified input formats can accept those formats" f = forms.TimeField(input_formats=["%H.%M.%S", "%H.%M"]) # Parse a time in an unaccepted format; get an error with self.assertRaises(forms.ValidationError): f.clean('1:30:05 PM') with self.assertRaises(forms.ValidationError): f.clean('13:30:05') # Parse a time in a valid format, get a parsed result result = f.clean('13.30.05') self.assertEqual(result, time(13, 30, 5)) # The parsed result does a round trip to the same format text = f.widget.format_value(result) self.assertEqual(text, "13:30:05") # Parse a time in a valid format, get a parsed result result = f.clean('13.30') self.assertEqual(result, time(13, 30, 0)) # The parsed result does a round trip to default format text = f.widget.format_value(result) self.assertEqual(text, "13:30:00") def test_localized_timeField_with_inputformat(self): "Localized TimeFields with manually specified input formats can accept those formats" f = forms.TimeField(input_formats=["%H.%M.%S", "%H.%M"], localize=True) # Parse a time in an unaccepted format; get an error with self.assertRaises(forms.ValidationError): f.clean('1:30:05 PM') with self.assertRaises(forms.ValidationError): f.clean('13:30:05') # Parse a time in a valid format, get a parsed result result = f.clean('13.30.05') self.assertEqual(result, time(13, 30, 5)) # The parsed result does a round trip to the same format text = f.widget.format_value(result) self.assertEqual(text, "13:30:05") # Parse a time in a valid format, get a parsed result result = f.clean('13.30') self.assertEqual(result, time(13, 30, 0)) # The parsed result does a round trip to default format text = f.widget.format_value(result) self.assertEqual(text, "13:30:00") @override_settings(TIME_INPUT_FORMATS=["%I:%M:%S %p", "%I:%M %p"]) class CustomTimeInputFormatsTests(SimpleTestCase): def test_timeField(self): "TimeFields can parse dates in the default format" f = forms.TimeField() # Parse a time in an unaccepted format; get an error with self.assertRaises(forms.ValidationError): f.clean('13:30:05') # Parse a time in a valid format, get a parsed result result = f.clean('1:30:05 PM') self.assertEqual(result, time(13, 30, 5)) # The parsed result does a round trip text = f.widget.format_value(result) self.assertEqual(text, '01:30:05 PM') # Parse a time in a valid, but non-default format, get a parsed result result = f.clean('1:30 PM') self.assertEqual(result, time(13, 30, 0)) # The parsed result does a round trip to default format text = f.widget.format_value(result) self.assertEqual(text, "01:30:00 PM") def test_localized_timeField(self): "Localized TimeFields act as unlocalized widgets" f = forms.TimeField(localize=True) # Parse a time in an unaccepted format; get an error with self.assertRaises(forms.ValidationError): f.clean('13:30:05') # Parse a time in a valid format, get a parsed result result = f.clean('1:30:05 PM') self.assertEqual(result, time(13, 30, 5)) # The parsed result does a round trip to the same format text = f.widget.format_value(result) self.assertEqual(text, '01:30:05 PM') # Parse a time in a valid format, get a parsed result result = f.clean('01:30 PM') self.assertEqual(result, time(13, 30, 0)) # The parsed result does a round trip to default format text = f.widget.format_value(result) self.assertEqual(text, "01:30:00 PM") def test_timeField_with_inputformat(self): "TimeFields with manually specified input formats can accept those formats" f = forms.TimeField(input_formats=["%H.%M.%S", "%H.%M"]) # Parse a time in an unaccepted format; get an error with self.assertRaises(forms.ValidationError): f.clean('1:30:05 PM') with self.assertRaises(forms.ValidationError): f.clean('13:30:05') # Parse a time in a valid format, get a parsed result result = f.clean('13.30.05') self.assertEqual(result, time(13, 30, 5)) # The parsed result does a round trip to the same format text = f.widget.format_value(result) self.assertEqual(text, "01:30:05 PM") # Parse a time in a valid format, get a parsed result result = f.clean('13.30') self.assertEqual(result, time(13, 30, 0)) # The parsed result does a round trip to default format text = f.widget.format_value(result) self.assertEqual(text, "01:30:00 PM") def test_localized_timeField_with_inputformat(self): "Localized TimeFields with manually specified input formats can accept those formats" f = forms.TimeField(input_formats=["%H.%M.%S", "%H.%M"], localize=True) # Parse a time in an unaccepted format; get an error with self.assertRaises(forms.ValidationError): f.clean('1:30:05 PM') with self.assertRaises(forms.ValidationError): f.clean('13:30:05') # Parse a time in a valid format, get a parsed result result = f.clean('13.30.05') self.assertEqual(result, time(13, 30, 5)) # The parsed result does a round trip to the same format text = f.widget.format_value(result) self.assertEqual(text, "01:30:05 PM") # Parse a time in a valid format, get a parsed result result = f.clean('13.30') self.assertEqual(result, time(13, 30, 0)) # The parsed result does a round trip to default format text = f.widget.format_value(result) self.assertEqual(text, "01:30:00 PM") class SimpleTimeFormatTests(SimpleTestCase): def test_timeField(self): "TimeFields can parse dates in the default format" f = forms.TimeField() # Parse a time in an unaccepted format; get an error with self.assertRaises(forms.ValidationError): f.clean('1:30:05 PM') # Parse a time in a valid format, get a parsed result result = f.clean('13:30:05') self.assertEqual(result, time(13, 30, 5)) # The parsed result does a round trip to the same format text = f.widget.format_value(result) self.assertEqual(text, "13:30:05") # Parse a time in a valid, but non-default format, get a parsed result result = f.clean('13:30') self.assertEqual(result, time(13, 30, 0)) # The parsed result does a round trip to default format text = f.widget.format_value(result) self.assertEqual(text, "13:30:00") def test_localized_timeField(self): "Localized TimeFields in a non-localized environment act as unlocalized widgets" f = forms.TimeField() # Parse a time in an unaccepted format; get an error with self.assertRaises(forms.ValidationError): f.clean('1:30:05 PM') # Parse a time in a valid format, get a parsed result result = f.clean('13:30:05') self.assertEqual(result, time(13, 30, 5)) # The parsed result does a round trip to the same format text = f.widget.format_value(result) self.assertEqual(text, "13:30:05") # Parse a time in a valid format, get a parsed result result = f.clean('13:30') self.assertEqual(result, time(13, 30, 0)) # The parsed result does a round trip to default format text = f.widget.format_value(result) self.assertEqual(text, "13:30:00") def test_timeField_with_inputformat(self): "TimeFields with manually specified input formats can accept those formats" f = forms.TimeField(input_formats=["%I:%M:%S %p", "%I:%M %p"]) # Parse a time in an unaccepted format; get an error with self.assertRaises(forms.ValidationError): f.clean('13:30:05') # Parse a time in a valid format, get a parsed result result = f.clean('1:30:05 PM') self.assertEqual(result, time(13, 30, 5)) # The parsed result does a round trip to the same format text = f.widget.format_value(result) self.assertEqual(text, "13:30:05") # Parse a time in a valid format, get a parsed result result = f.clean('1:30 PM') self.assertEqual(result, time(13, 30, 0)) # The parsed result does a round trip to default format text = f.widget.format_value(result) self.assertEqual(text, "13:30:00") def test_localized_timeField_with_inputformat(self): "Localized TimeFields with manually specified input formats can accept those formats" f = forms.TimeField(input_formats=["%I:%M:%S %p", "%I:%M %p"], localize=True) # Parse a time in an unaccepted format; get an error with self.assertRaises(forms.ValidationError): f.clean('13:30:05') # Parse a time in a valid format, get a parsed result result = f.clean('1:30:05 PM') self.assertEqual(result, time(13, 30, 5)) # The parsed result does a round trip to the same format text = f.widget.format_value(result) self.assertEqual(text, "13:30:05") # Parse a time in a valid format, get a parsed result result = f.clean('1:30 PM') self.assertEqual(result, time(13, 30, 0)) # The parsed result does a round trip to default format text = f.widget.format_value(result) self.assertEqual(text, "13:30:00") @override_settings(DATE_INPUT_FORMATS=["%d/%m/%Y", "%d-%m-%Y"], USE_L10N=True) class LocalizedDateTests(SimpleTestCase): def setUp(self): activate('de') def tearDown(self): deactivate() def test_dateField(self): "DateFields can parse dates in the default format" f = forms.DateField() # Parse a date in an unaccepted format; get an error with self.assertRaises(forms.ValidationError): f.clean('21/12/2010') # ISO formats are accepted, even if not specified in formats.py self.assertEqual(f.clean('2010-12-21'), date(2010, 12, 21)) # Parse a date in a valid format, get a parsed result result = f.clean('21.12.2010') self.assertEqual(result, date(2010, 12, 21)) # The parsed result does a round trip text = f.widget.format_value(result) self.assertEqual(text, '21.12.2010') # Parse a date in a valid, but non-default format, get a parsed result result = f.clean('21.12.10') self.assertEqual(result, date(2010, 12, 21)) # The parsed result does a round trip to default format text = f.widget.format_value(result) self.assertEqual(text, "21.12.2010") def test_localized_dateField(self): "Localized DateFields act as unlocalized widgets" f = forms.DateField(localize=True) # Parse a date in an unaccepted format; get an error with self.assertRaises(forms.ValidationError): f.clean('21/12/2010') # Parse a date in a valid format, get a parsed result result = f.clean('21.12.2010') self.assertEqual(result, date(2010, 12, 21)) # The parsed result does a round trip to the same format text = f.widget.format_value(result) self.assertEqual(text, '21.12.2010') # Parse a date in a valid format, get a parsed result result = f.clean('21.12.10') self.assertEqual(result, date(2010, 12, 21)) # The parsed result does a round trip to default format text = f.widget.format_value(result) self.assertEqual(text, "21.12.2010") def test_dateField_with_inputformat(self): "DateFields with manually specified input formats can accept those formats" f = forms.DateField(input_formats=["%m.%d.%Y", "%m-%d-%Y"]) # Parse a date in an unaccepted format; get an error with self.assertRaises(forms.ValidationError): f.clean('2010-12-21') with self.assertRaises(forms.ValidationError): f.clean('21/12/2010') with self.assertRaises(forms.ValidationError): f.clean('21.12.2010') # Parse a date in a valid format, get a parsed result result = f.clean('12.21.2010') self.assertEqual(result, date(2010, 12, 21)) # The parsed result does a round trip to the same format text = f.widget.format_value(result) self.assertEqual(text, "21.12.2010") # Parse a date in a valid format, get a parsed result result = f.clean('12-21-2010') self.assertEqual(result, date(2010, 12, 21)) # The parsed result does a round trip to default format text = f.widget.format_value(result) self.assertEqual(text, "21.12.2010") def test_localized_dateField_with_inputformat(self): "Localized DateFields with manually specified input formats can accept those formats" f = forms.DateField(input_formats=["%m.%d.%Y", "%m-%d-%Y"], localize=True) # Parse a date in an unaccepted format; get an error with self.assertRaises(forms.ValidationError): f.clean('2010-12-21') with self.assertRaises(forms.ValidationError): f.clean('21/12/2010') with self.assertRaises(forms.ValidationError): f.clean('21.12.2010') # Parse a date in a valid format, get a parsed result result = f.clean('12.21.2010') self.assertEqual(result, date(2010, 12, 21)) # The parsed result does a round trip to the same format text = f.widget.format_value(result) self.assertEqual(text, "21.12.2010") # Parse a date in a valid format, get a parsed result result = f.clean('12-21-2010') self.assertEqual(result, date(2010, 12, 21)) # The parsed result does a round trip to default format text = f.widget.format_value(result) self.assertEqual(text, "21.12.2010") @override_settings(DATE_INPUT_FORMATS=["%d.%m.%Y", "%d-%m-%Y"]) class CustomDateInputFormatsTests(SimpleTestCase): def test_dateField(self): "DateFields can parse dates in the default format" f = forms.DateField() # Parse a date in an unaccepted format; get an error with self.assertRaises(forms.ValidationError): f.clean('2010-12-21') # Parse a date in a valid format, get a parsed result result = f.clean('21.12.2010') self.assertEqual(result, date(2010, 12, 21)) # The parsed result does a round trip text = f.widget.format_value(result) self.assertEqual(text, '21.12.2010') # Parse a date in a valid, but non-default format, get a parsed result result = f.clean('21-12-2010') self.assertEqual(result, date(2010, 12, 21)) # The parsed result does a round trip to default format text = f.widget.format_value(result) self.assertEqual(text, "21.12.2010") def test_localized_dateField(self): "Localized DateFields act as unlocalized widgets" f = forms.DateField(localize=True) # Parse a date in an unaccepted format; get an error with self.assertRaises(forms.ValidationError): f.clean('2010-12-21') # Parse a date in a valid format, get a parsed result result = f.clean('21.12.2010') self.assertEqual(result, date(2010, 12, 21)) # The parsed result does a round trip to the same format text = f.widget.format_value(result) self.assertEqual(text, '21.12.2010') # Parse a date in a valid format, get a parsed result result = f.clean('21-12-2010') self.assertEqual(result, date(2010, 12, 21)) # The parsed result does a round trip to default format text = f.widget.format_value(result) self.assertEqual(text, "21.12.2010") def test_dateField_with_inputformat(self): "DateFields with manually specified input formats can accept those formats" f = forms.DateField(input_formats=["%m.%d.%Y", "%m-%d-%Y"]) # Parse a date in an unaccepted format; get an error with self.assertRaises(forms.ValidationError): f.clean('21.12.2010') with self.assertRaises(forms.ValidationError): f.clean('2010-12-21') # Parse a date in a valid format, get a parsed result result = f.clean('12.21.2010') self.assertEqual(result, date(2010, 12, 21)) # The parsed result does a round trip to the same format text = f.widget.format_value(result) self.assertEqual(text, "21.12.2010") # Parse a date in a valid format, get a parsed result result = f.clean('12-21-2010') self.assertEqual(result, date(2010, 12, 21)) # The parsed result does a round trip to default format text = f.widget.format_value(result) self.assertEqual(text, "21.12.2010") def test_localized_dateField_with_inputformat(self): "Localized DateFields with manually specified input formats can accept those formats" f = forms.DateField(input_formats=["%m.%d.%Y", "%m-%d-%Y"], localize=True) # Parse a date in an unaccepted format; get an error with self.assertRaises(forms.ValidationError): f.clean('21.12.2010') with self.assertRaises(forms.ValidationError): f.clean('2010-12-21') # Parse a date in a valid format, get a parsed result result = f.clean('12.21.2010') self.assertEqual(result, date(2010, 12, 21)) # The parsed result does a round trip to the same format text = f.widget.format_value(result) self.assertEqual(text, "21.12.2010") # Parse a date in a valid format, get a parsed result result = f.clean('12-21-2010') self.assertEqual(result, date(2010, 12, 21)) # The parsed result does a round trip to default format text = f.widget.format_value(result) self.assertEqual(text, "21.12.2010") class SimpleDateFormatTests(SimpleTestCase): def test_dateField(self): "DateFields can parse dates in the default format" f = forms.DateField() # Parse a date in an unaccepted format; get an error with self.assertRaises(forms.ValidationError): f.clean('21.12.2010') # Parse a date in a valid format, get a parsed result result = f.clean('2010-12-21') self.assertEqual(result, date(2010, 12, 21)) # The parsed result does a round trip to the same format text = f.widget.format_value(result) self.assertEqual(text, "2010-12-21") # Parse a date in a valid, but non-default format, get a parsed result result = f.clean('12/21/2010') self.assertEqual(result, date(2010, 12, 21)) # The parsed result does a round trip to default format text = f.widget.format_value(result) self.assertEqual(text, "2010-12-21") def test_localized_dateField(self): "Localized DateFields in a non-localized environment act as unlocalized widgets" f = forms.DateField() # Parse a date in an unaccepted format; get an error with self.assertRaises(forms.ValidationError): f.clean('21.12.2010') # Parse a date in a valid format, get a parsed result result = f.clean('2010-12-21') self.assertEqual(result, date(2010, 12, 21)) # The parsed result does a round trip to the same format text = f.widget.format_value(result) self.assertEqual(text, "2010-12-21") # Parse a date in a valid format, get a parsed result result = f.clean('12/21/2010') self.assertEqual(result, date(2010, 12, 21)) # The parsed result does a round trip to default format text = f.widget.format_value(result) self.assertEqual(text, "2010-12-21") def test_dateField_with_inputformat(self): "DateFields with manually specified input formats can accept those formats" f = forms.DateField(input_formats=["%d.%m.%Y", "%d-%m-%Y"]) # Parse a date in an unaccepted format; get an error with self.assertRaises(forms.ValidationError): f.clean('2010-12-21') # Parse a date in a valid format, get a parsed result result = f.clean('21.12.2010') self.assertEqual(result, date(2010, 12, 21)) # The parsed result does a round trip to the same format text = f.widget.format_value(result) self.assertEqual(text, "2010-12-21") # Parse a date in a valid format, get a parsed result result = f.clean('21-12-2010') self.assertEqual(result, date(2010, 12, 21)) # The parsed result does a round trip to default format text = f.widget.format_value(result) self.assertEqual(text, "2010-12-21") def test_localized_dateField_with_inputformat(self): "Localized DateFields with manually specified input formats can accept those formats" f = forms.DateField(input_formats=["%d.%m.%Y", "%d-%m-%Y"], localize=True) # Parse a date in an unaccepted format; get an error with self.assertRaises(forms.ValidationError): f.clean('2010-12-21') # Parse a date in a valid format, get a parsed result result = f.clean('21.12.2010') self.assertEqual(result, date(2010, 12, 21)) # The parsed result does a round trip to the same format text = f.widget.format_value(result) self.assertEqual(text, "2010-12-21") # Parse a date in a valid format, get a parsed result result = f.clean('21-12-2010') self.assertEqual(result, date(2010, 12, 21)) # The parsed result does a round trip to default format text = f.widget.format_value(result) self.assertEqual(text, "2010-12-21") @override_settings(DATETIME_INPUT_FORMATS=["%I:%M:%S %p %d/%m/%Y", "%I:%M %p %d-%m-%Y"], USE_L10N=True) class LocalizedDateTimeTests(SimpleTestCase): def setUp(self): activate('de') def tearDown(self): deactivate() def test_dateTimeField(self): "DateTimeFields can parse dates in the default format" f = forms.DateTimeField() # Parse a date in an unaccepted format; get an error with self.assertRaises(forms.ValidationError): f.clean('1:30:05 PM 21/12/2010') # ISO formats are accepted, even if not specified in formats.py self.assertEqual(f.clean('2010-12-21 13:30:05'), datetime(2010, 12, 21, 13, 30, 5)) # Parse a date in a valid format, get a parsed result result = f.clean('21.12.2010 13:30:05') self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5)) # The parsed result does a round trip text = f.widget.format_value(result) self.assertEqual(text, '21.12.2010 13:30:05') # Parse a date in a valid, but non-default format, get a parsed result result = f.clean('21.12.2010 13:30') self.assertEqual(result, datetime(2010, 12, 21, 13, 30)) # The parsed result does a round trip to default format text = f.widget.format_value(result) self.assertEqual(text, "21.12.2010 13:30:00") def test_localized_dateTimeField(self): "Localized DateTimeFields act as unlocalized widgets" f = forms.DateTimeField(localize=True) # Parse a date in an unaccepted format; get an error with self.assertRaises(forms.ValidationError): f.clean('1:30:05 PM 21/12/2010') # Parse a date in a valid format, get a parsed result result = f.clean('21.12.2010 13:30:05') self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5)) # The parsed result does a round trip to the same format text = f.widget.format_value(result) self.assertEqual(text, '21.12.2010 13:30:05') # Parse a date in a valid format, get a parsed result result = f.clean('21.12.2010 13:30') self.assertEqual(result, datetime(2010, 12, 21, 13, 30)) # The parsed result does a round trip to default format text = f.widget.format_value(result) self.assertEqual(text, "21.12.2010 13:30:00") def test_dateTimeField_with_inputformat(self): "DateTimeFields with manually specified input formats can accept those formats" f = forms.DateTimeField(input_formats=["%H.%M.%S %m.%d.%Y", "%H.%M %m-%d-%Y"]) # Parse a date in an unaccepted format; get an error with self.assertRaises(forms.ValidationError): f.clean('2010-12-21 13:30:05 13:30:05') with self.assertRaises(forms.ValidationError): f.clean('1:30:05 PM 21/12/2010') with self.assertRaises(forms.ValidationError): f.clean('13:30:05 21.12.2010') # Parse a date in a valid format, get a parsed result result = f.clean('13.30.05 12.21.2010') self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5)) # The parsed result does a round trip to the same format text = f.widget.format_value(result) self.assertEqual(text, "21.12.2010 13:30:05") # Parse a date in a valid format, get a parsed result result = f.clean('13.30 12-21-2010') self.assertEqual(result, datetime(2010, 12, 21, 13, 30)) # The parsed result does a round trip to default format text = f.widget.format_value(result) self.assertEqual(text, "21.12.2010 13:30:00") def test_localized_dateTimeField_with_inputformat(self): "Localized DateTimeFields with manually specified input formats can accept those formats" f = forms.DateTimeField(input_formats=["%H.%M.%S %m.%d.%Y", "%H.%M %m-%d-%Y"], localize=True) # Parse a date in an unaccepted format; get an error with self.assertRaises(forms.ValidationError): f.clean('2010/12/21 13:30:05') with self.assertRaises(forms.ValidationError): f.clean('1:30:05 PM 21/12/2010') with self.assertRaises(forms.ValidationError): f.clean('13:30:05 21.12.2010') # Parse a date in a valid format, get a parsed result result = f.clean('13.30.05 12.21.2010') self.assertEqual(datetime(2010, 12, 21, 13, 30, 5), result) # ISO format is always valid. self.assertEqual( f.clean('2010-12-21 13:30:05'), datetime(2010, 12, 21, 13, 30, 5), ) # The parsed result does a round trip to the same format text = f.widget.format_value(result) self.assertEqual(text, "21.12.2010 13:30:05") # Parse a date in a valid format, get a parsed result result = f.clean('13.30 12-21-2010') self.assertEqual(result, datetime(2010, 12, 21, 13, 30)) # The parsed result does a round trip to default format text = f.widget.format_value(result) self.assertEqual(text, "21.12.2010 13:30:00") @override_settings(DATETIME_INPUT_FORMATS=["%I:%M:%S %p %d/%m/%Y", "%I:%M %p %d-%m-%Y"]) class CustomDateTimeInputFormatsTests(SimpleTestCase): def test_dateTimeField(self): "DateTimeFields can parse dates in the default format" f = forms.DateTimeField() # Parse a date in an unaccepted format; get an error with self.assertRaises(forms.ValidationError): f.clean('2010/12/21 13:30:05') # Parse a date in a valid format, get a parsed result result = f.clean('1:30:05 PM 21/12/2010') self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5)) # The parsed result does a round trip text = f.widget.format_value(result) self.assertEqual(text, '01:30:05 PM 21/12/2010') # Parse a date in a valid, but non-default format, get a parsed result result = f.clean('1:30 PM 21-12-2010') self.assertEqual(result, datetime(2010, 12, 21, 13, 30)) # The parsed result does a round trip to default format text = f.widget.format_value(result) self.assertEqual(text, "01:30:00 PM 21/12/2010") def test_localized_dateTimeField(self): "Localized DateTimeFields act as unlocalized widgets" f = forms.DateTimeField(localize=True) # Parse a date in an unaccepted format; get an error with self.assertRaises(forms.ValidationError): f.clean('2010/12/21 13:30:05') # Parse a date in a valid format, get a parsed result result = f.clean('1:30:05 PM 21/12/2010') self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5)) # The parsed result does a round trip to the same format text = f.widget.format_value(result) self.assertEqual(text, '01:30:05 PM 21/12/2010') # Parse a date in a valid format, get a parsed result result = f.clean('1:30 PM 21-12-2010') self.assertEqual(result, datetime(2010, 12, 21, 13, 30)) # The parsed result does a round trip to default format text = f.widget.format_value(result) self.assertEqual(text, "01:30:00 PM 21/12/2010") def test_dateTimeField_with_inputformat(self): "DateTimeFields with manually specified input formats can accept those formats" f = forms.DateTimeField(input_formats=["%m.%d.%Y %H:%M:%S", "%m-%d-%Y %H:%M"]) # Parse a date in an unaccepted format; get an error with self.assertRaises(forms.ValidationError): f.clean('13:30:05 21.12.2010') with self.assertRaises(forms.ValidationError): f.clean('2010/12/21 13:30:05') # Parse a date in a valid format, get a parsed result result = f.clean('12.21.2010 13:30:05') self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5)) # The parsed result does a round trip to the same format text = f.widget.format_value(result) self.assertEqual(text, "01:30:05 PM 21/12/2010") # Parse a date in a valid format, get a parsed result result = f.clean('12-21-2010 13:30') self.assertEqual(result, datetime(2010, 12, 21, 13, 30)) # The parsed result does a round trip to default format text = f.widget.format_value(result) self.assertEqual(text, "01:30:00 PM 21/12/2010") def test_localized_dateTimeField_with_inputformat(self): "Localized DateTimeFields with manually specified input formats can accept those formats" f = forms.DateTimeField(input_formats=["%m.%d.%Y %H:%M:%S", "%m-%d-%Y %H:%M"], localize=True) # Parse a date in an unaccepted format; get an error with self.assertRaises(forms.ValidationError): f.clean('13:30:05 21.12.2010') with self.assertRaises(forms.ValidationError): f.clean('2010/12/21 13:30:05') # Parse a date in a valid format, get a parsed result result = f.clean('12.21.2010 13:30:05') self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5)) # The parsed result does a round trip to the same format text = f.widget.format_value(result) self.assertEqual(text, "01:30:05 PM 21/12/2010") # Parse a date in a valid format, get a parsed result result = f.clean('12-21-2010 13:30') self.assertEqual(result, datetime(2010, 12, 21, 13, 30)) # The parsed result does a round trip to default format text = f.widget.format_value(result) self.assertEqual(text, "01:30:00 PM 21/12/2010") class SimpleDateTimeFormatTests(SimpleTestCase): def test_dateTimeField(self): "DateTimeFields can parse dates in the default format" f = forms.DateTimeField() # Parse a date in an unaccepted format; get an error with self.assertRaises(forms.ValidationError): f.clean('13:30:05 21.12.2010') # Parse a date in a valid format, get a parsed result result = f.clean('2010-12-21 13:30:05') self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5)) # The parsed result does a round trip to the same format text = f.widget.format_value(result) self.assertEqual(text, "2010-12-21 13:30:05") # Parse a date in a valid, but non-default format, get a parsed result result = f.clean('12/21/2010 13:30:05') self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5)) # The parsed result does a round trip to default format text = f.widget.format_value(result) self.assertEqual(text, "2010-12-21 13:30:05") def test_localized_dateTimeField(self): "Localized DateTimeFields in a non-localized environment act as unlocalized widgets" f = forms.DateTimeField() # Parse a date in an unaccepted format; get an error with self.assertRaises(forms.ValidationError): f.clean('13:30:05 21.12.2010') # Parse a date in a valid format, get a parsed result result = f.clean('2010-12-21 13:30:05') self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5)) # The parsed result does a round trip to the same format text = f.widget.format_value(result) self.assertEqual(text, "2010-12-21 13:30:05") # Parse a date in a valid format, get a parsed result result = f.clean('12/21/2010 13:30:05') self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5)) # The parsed result does a round trip to default format text = f.widget.format_value(result) self.assertEqual(text, "2010-12-21 13:30:05") def test_dateTimeField_with_inputformat(self): "DateTimeFields with manually specified input formats can accept those formats" f = forms.DateTimeField(input_formats=["%I:%M:%S %p %d.%m.%Y", "%I:%M %p %d-%m-%Y"]) # Parse a date in an unaccepted format; get an error with self.assertRaises(forms.ValidationError): f.clean('2010/12/21 13:30:05') # Parse a date in a valid format, get a parsed result result = f.clean('1:30:05 PM 21.12.2010') self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5)) # The parsed result does a round trip to the same format text = f.widget.format_value(result) self.assertEqual(text, "2010-12-21 13:30:05") # Parse a date in a valid format, get a parsed result result = f.clean('1:30 PM 21-12-2010') self.assertEqual(result, datetime(2010, 12, 21, 13, 30)) # The parsed result does a round trip to default format text = f.widget.format_value(result) self.assertEqual(text, "2010-12-21 13:30:00") def test_localized_dateTimeField_with_inputformat(self): "Localized DateTimeFields with manually specified input formats can accept those formats" f = forms.DateTimeField(input_formats=["%I:%M:%S %p %d.%m.%Y", "%I:%M %p %d-%m-%Y"], localize=True) # Parse a date in an unaccepted format; get an error with self.assertRaises(forms.ValidationError): f.clean('2010/12/21 13:30:05') # Parse a date in a valid format, get a parsed result result = f.clean('1:30:05 PM 21.12.2010') self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5)) # The parsed result does a round trip to the same format text = f.widget.format_value(result) self.assertEqual(text, "2010-12-21 13:30:05") # Parse a date in a valid format, get a parsed result result = f.clean('1:30 PM 21-12-2010') self.assertEqual(result, datetime(2010, 12, 21, 13, 30)) # The parsed result does a round trip to default format text = f.widget.format_value(result) self.assertEqual(text, "2010-12-21 13:30:00")
3aec4bb0bcbf59c9f05e1976bfceed6367b3a22b90a973878726ce0f64d052c9
import copy import datetime import json import uuid from django.core.exceptions import NON_FIELD_ERRORS from django.core.files.uploadedfile import SimpleUploadedFile from django.core.validators import MaxValueValidator, RegexValidator from django.forms import ( BooleanField, CharField, CheckboxSelectMultiple, ChoiceField, DateField, DateTimeField, EmailField, FileField, FileInput, FloatField, Form, HiddenInput, ImageField, IntegerField, MultipleChoiceField, MultipleHiddenInput, MultiValueField, NullBooleanField, PasswordInput, RadioSelect, Select, SplitDateTimeField, SplitHiddenDateTimeWidget, Textarea, TextInput, TimeField, ValidationError, forms, ) from django.forms.renderers import DjangoTemplates, get_default_renderer from django.forms.utils import ErrorList from django.http import QueryDict from django.template import Context, Template from django.test import SimpleTestCase from django.utils.datastructures import MultiValueDict from django.utils.safestring import mark_safe class Person(Form): first_name = CharField() last_name = CharField() birthday = DateField() class PersonNew(Form): first_name = CharField(widget=TextInput(attrs={'id': 'first_name_id'})) last_name = CharField() birthday = DateField() class MultiValueDictLike(dict): def getlist(self, key): return [self[key]] class FormsTestCase(SimpleTestCase): # A Form is a collection of Fields. It knows how to validate a set of data and it # knows how to render itself in a couple of default ways (e.g., an HTML table). # You can pass it data in __init__(), as a dictionary. def test_form(self): # Pass a dictionary to a Form's __init__(). p = Person({'first_name': 'John', 'last_name': 'Lennon', 'birthday': '1940-10-9'}) self.assertTrue(p.is_bound) self.assertEqual(p.errors, {}) self.assertTrue(p.is_valid()) self.assertHTMLEqual(p.errors.as_ul(), '') self.assertEqual(p.errors.as_text(), '') self.assertEqual(p.cleaned_data["first_name"], 'John') self.assertEqual(p.cleaned_data["last_name"], 'Lennon') self.assertEqual(p.cleaned_data["birthday"], datetime.date(1940, 10, 9)) self.assertHTMLEqual( str(p['first_name']), '<input type="text" name="first_name" value="John" id="id_first_name" required>' ) self.assertHTMLEqual( str(p['last_name']), '<input type="text" name="last_name" value="Lennon" id="id_last_name" required>' ) self.assertHTMLEqual( str(p['birthday']), '<input type="text" name="birthday" value="1940-10-9" id="id_birthday" required>' ) msg = "Key 'nonexistentfield' not found in 'Person'. Choices are: birthday, first_name, last_name." with self.assertRaisesMessage(KeyError, msg): p['nonexistentfield'] form_output = [] for boundfield in p: form_output.append(str(boundfield)) self.assertHTMLEqual( '\n'.join(form_output), """<input type="text" name="first_name" value="John" id="id_first_name" required> <input type="text" name="last_name" value="Lennon" id="id_last_name" required> <input type="text" name="birthday" value="1940-10-9" id="id_birthday" required>""" ) form_output = [] for boundfield in p: form_output.append([boundfield.label, boundfield.data]) self.assertEqual(form_output, [ ['First name', 'John'], ['Last name', 'Lennon'], ['Birthday', '1940-10-9'] ]) self.assertHTMLEqual( str(p), """<tr><th><label for="id_first_name">First name:</label></th><td> <input type="text" name="first_name" value="John" id="id_first_name" required></td></tr> <tr><th><label for="id_last_name">Last name:</label></th><td> <input type="text" name="last_name" value="Lennon" id="id_last_name" required></td></tr> <tr><th><label for="id_birthday">Birthday:</label></th><td> <input type="text" name="birthday" value="1940-10-9" id="id_birthday" required></td></tr>""" ) def test_empty_dict(self): # Empty dictionaries are valid, too. p = Person({}) self.assertTrue(p.is_bound) self.assertEqual(p.errors['first_name'], ['This field is required.']) self.assertEqual(p.errors['last_name'], ['This field is required.']) self.assertEqual(p.errors['birthday'], ['This field is required.']) self.assertFalse(p.is_valid()) self.assertEqual(p.cleaned_data, {}) self.assertHTMLEqual( str(p), """<tr><th><label for="id_first_name">First name:</label></th><td> <ul class="errorlist"><li>This field is required.</li></ul> <input type="text" name="first_name" id="id_first_name" required></td></tr> <tr><th><label for="id_last_name">Last name:</label></th> <td><ul class="errorlist"><li>This field is required.</li></ul> <input type="text" name="last_name" id="id_last_name" required></td></tr> <tr><th><label for="id_birthday">Birthday:</label></th><td> <ul class="errorlist"><li>This field is required.</li></ul> <input type="text" name="birthday" id="id_birthday" required></td></tr>""" ) self.assertHTMLEqual( p.as_table(), """<tr><th><label for="id_first_name">First name:</label></th><td> <ul class="errorlist"><li>This field is required.</li></ul> <input type="text" name="first_name" id="id_first_name" required></td></tr> <tr><th><label for="id_last_name">Last name:</label></th> <td><ul class="errorlist"><li>This field is required.</li></ul> <input type="text" name="last_name" id="id_last_name" required></td></tr> <tr><th><label for="id_birthday">Birthday:</label></th> <td><ul class="errorlist"><li>This field is required.</li></ul> <input type="text" name="birthday" id="id_birthday" required></td></tr>""" ) self.assertHTMLEqual( p.as_ul(), """<li><ul class="errorlist"><li>This field is required.</li></ul> <label for="id_first_name">First name:</label> <input type="text" name="first_name" id="id_first_name" required></li> <li><ul class="errorlist"><li>This field is required.</li></ul> <label for="id_last_name">Last name:</label> <input type="text" name="last_name" id="id_last_name" required></li> <li><ul class="errorlist"><li>This field is required.</li></ul> <label for="id_birthday">Birthday:</label> <input type="text" name="birthday" id="id_birthday" required></li>""" ) self.assertHTMLEqual( p.as_p(), """<ul class="errorlist"><li>This field is required.</li></ul> <p><label for="id_first_name">First name:</label> <input type="text" name="first_name" id="id_first_name" required></p> <ul class="errorlist"><li>This field is required.</li></ul> <p><label for="id_last_name">Last name:</label> <input type="text" name="last_name" id="id_last_name" required></p> <ul class="errorlist"><li>This field is required.</li></ul> <p><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" id="id_birthday" required></p>""" ) def test_empty_querydict_args(self): data = QueryDict() files = QueryDict() p = Person(data, files) self.assertIs(p.data, data) self.assertIs(p.files, files) def test_unbound_form(self): # If you don't pass any values to the Form's __init__(), or if you pass None, # the Form will be considered unbound and won't do any validation. Form.errors # will be an empty dictionary *but* Form.is_valid() will return False. p = Person() self.assertFalse(p.is_bound) self.assertEqual(p.errors, {}) self.assertFalse(p.is_valid()) with self.assertRaises(AttributeError): p.cleaned_data self.assertHTMLEqual( str(p), """<tr><th><label for="id_first_name">First name:</label></th><td> <input type="text" name="first_name" id="id_first_name" required></td></tr> <tr><th><label for="id_last_name">Last name:</label></th><td> <input type="text" name="last_name" id="id_last_name" required></td></tr> <tr><th><label for="id_birthday">Birthday:</label></th><td> <input type="text" name="birthday" id="id_birthday" required></td></tr>""" ) self.assertHTMLEqual( p.as_table(), """<tr><th><label for="id_first_name">First name:</label></th><td> <input type="text" name="first_name" id="id_first_name" required></td></tr> <tr><th><label for="id_last_name">Last name:</label></th><td> <input type="text" name="last_name" id="id_last_name" required></td></tr> <tr><th><label for="id_birthday">Birthday:</label></th><td> <input type="text" name="birthday" id="id_birthday" required></td></tr>""" ) self.assertHTMLEqual( p.as_ul(), """<li><label for="id_first_name">First name:</label> <input type="text" name="first_name" id="id_first_name" required></li> <li><label for="id_last_name">Last name:</label> <input type="text" name="last_name" id="id_last_name" required></li> <li><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" id="id_birthday" required></li>""" ) self.assertHTMLEqual( p.as_p(), """<p><label for="id_first_name">First name:</label> <input type="text" name="first_name" id="id_first_name" required></p> <p><label for="id_last_name">Last name:</label> <input type="text" name="last_name" id="id_last_name" required></p> <p><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" id="id_birthday" required></p>""" ) def test_unicode_values(self): # Unicode values are handled properly. p = Person({ 'first_name': 'John', 'last_name': '\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111', 'birthday': '1940-10-9' }) self.assertHTMLEqual( p.as_table(), '<tr><th><label for="id_first_name">First name:</label></th><td>' '<input type="text" name="first_name" value="John" id="id_first_name" required></td></tr>\n' '<tr><th><label for="id_last_name">Last name:</label>' '</th><td><input type="text" name="last_name" ' 'value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111"' 'id="id_last_name" required></td></tr>\n' '<tr><th><label for="id_birthday">Birthday:</label></th><td>' '<input type="text" name="birthday" value="1940-10-9" id="id_birthday" required></td></tr>' ) self.assertHTMLEqual( p.as_ul(), '<li><label for="id_first_name">First name:</label> ' '<input type="text" name="first_name" value="John" id="id_first_name" required></li>\n' '<li><label for="id_last_name">Last name:</label> ' '<input type="text" name="last_name" ' 'value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" id="id_last_name" required></li>\n' '<li><label for="id_birthday">Birthday:</label> ' '<input type="text" name="birthday" value="1940-10-9" id="id_birthday" required></li>' ) self.assertHTMLEqual( p.as_p(), '<p><label for="id_first_name">First name:</label> ' '<input type="text" name="first_name" value="John" id="id_first_name" required></p>\n' '<p><label for="id_last_name">Last name:</label> ' '<input type="text" name="last_name" ' 'value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" id="id_last_name" required></p>\n' '<p><label for="id_birthday">Birthday:</label> ' '<input type="text" name="birthday" value="1940-10-9" id="id_birthday" required></p>' ) p = Person({'last_name': 'Lennon'}) self.assertEqual(p.errors['first_name'], ['This field is required.']) self.assertEqual(p.errors['birthday'], ['This field is required.']) self.assertFalse(p.is_valid()) self.assertEqual( p.errors, {'birthday': ['This field is required.'], 'first_name': ['This field is required.']} ) self.assertEqual(p.cleaned_data, {'last_name': 'Lennon'}) self.assertEqual(p['first_name'].errors, ['This field is required.']) self.assertHTMLEqual( p['first_name'].errors.as_ul(), '<ul class="errorlist"><li>This field is required.</li></ul>' ) self.assertEqual(p['first_name'].errors.as_text(), '* This field is required.') p = Person() self.assertHTMLEqual( str(p['first_name']), '<input type="text" name="first_name" id="id_first_name" required>', ) self.assertHTMLEqual(str(p['last_name']), '<input type="text" name="last_name" id="id_last_name" required>') self.assertHTMLEqual(str(p['birthday']), '<input type="text" name="birthday" id="id_birthday" required>') def test_cleaned_data_only_fields(self): # cleaned_data will always *only* contain a key for fields defined in the # Form, even if you pass extra data when you define the Form. In this # example, we pass a bunch of extra fields to the form constructor, # but cleaned_data contains only the form's fields. data = { 'first_name': 'John', 'last_name': 'Lennon', 'birthday': '1940-10-9', 'extra1': 'hello', 'extra2': 'hello', } p = Person(data) self.assertTrue(p.is_valid()) self.assertEqual(p.cleaned_data['first_name'], 'John') self.assertEqual(p.cleaned_data['last_name'], 'Lennon') self.assertEqual(p.cleaned_data['birthday'], datetime.date(1940, 10, 9)) def test_optional_data(self): # cleaned_data will include a key and value for *all* fields defined in the Form, # even if the Form's data didn't include a value for fields that are not # required. In this example, the data dictionary doesn't include a value for the # "nick_name" field, but cleaned_data includes it. For CharFields, it's set to the # empty string. class OptionalPersonForm(Form): first_name = CharField() last_name = CharField() nick_name = CharField(required=False) data = {'first_name': 'John', 'last_name': 'Lennon'} f = OptionalPersonForm(data) self.assertTrue(f.is_valid()) self.assertEqual(f.cleaned_data['nick_name'], '') self.assertEqual(f.cleaned_data['first_name'], 'John') self.assertEqual(f.cleaned_data['last_name'], 'Lennon') # For DateFields, it's set to None. class OptionalPersonForm(Form): first_name = CharField() last_name = CharField() birth_date = DateField(required=False) data = {'first_name': 'John', 'last_name': 'Lennon'} f = OptionalPersonForm(data) self.assertTrue(f.is_valid()) self.assertIsNone(f.cleaned_data['birth_date']) self.assertEqual(f.cleaned_data['first_name'], 'John') self.assertEqual(f.cleaned_data['last_name'], 'Lennon') def test_auto_id(self): # "auto_id" tells the Form to add an "id" attribute to each form element. # If it's a string that contains '%s', Django will use that as a format string # into which the field's name will be inserted. It will also put a <label> around # the human-readable labels for a field. p = Person(auto_id='%s_id') self.assertHTMLEqual( p.as_table(), """<tr><th><label for="first_name_id">First name:</label></th><td> <input type="text" name="first_name" id="first_name_id" required></td></tr> <tr><th><label for="last_name_id">Last name:</label></th><td> <input type="text" name="last_name" id="last_name_id" required></td></tr> <tr><th><label for="birthday_id">Birthday:</label></th><td> <input type="text" name="birthday" id="birthday_id" required></td></tr>""" ) self.assertHTMLEqual( p.as_ul(), """<li><label for="first_name_id">First name:</label> <input type="text" name="first_name" id="first_name_id" required></li> <li><label for="last_name_id">Last name:</label> <input type="text" name="last_name" id="last_name_id" required></li> <li><label for="birthday_id">Birthday:</label> <input type="text" name="birthday" id="birthday_id" required></li>""" ) self.assertHTMLEqual( p.as_p(), """<p><label for="first_name_id">First name:</label> <input type="text" name="first_name" id="first_name_id" required></p> <p><label for="last_name_id">Last name:</label> <input type="text" name="last_name" id="last_name_id" required></p> <p><label for="birthday_id">Birthday:</label> <input type="text" name="birthday" id="birthday_id" required></p>""" ) def test_auto_id_true(self): # If auto_id is any True value whose str() does not contain '%s', the "id" # attribute will be the name of the field. p = Person(auto_id=True) self.assertHTMLEqual( p.as_ul(), """<li><label for="first_name">First name:</label> <input type="text" name="first_name" id="first_name" required></li> <li><label for="last_name">Last name:</label> <input type="text" name="last_name" id="last_name" required></li> <li><label for="birthday">Birthday:</label> <input type="text" name="birthday" id="birthday" required></li>""" ) def test_auto_id_false(self): # If auto_id is any False value, an "id" attribute won't be output unless it # was manually entered. p = Person(auto_id=False) self.assertHTMLEqual( p.as_ul(), """<li>First name: <input type="text" name="first_name" required></li> <li>Last name: <input type="text" name="last_name" required></li> <li>Birthday: <input type="text" name="birthday" required></li>""" ) def test_id_on_field(self): # In this example, auto_id is False, but the "id" attribute for the "first_name" # field is given. Also note that field gets a <label>, while the others don't. p = PersonNew(auto_id=False) self.assertHTMLEqual( p.as_ul(), """<li><label for="first_name_id">First name:</label> <input type="text" id="first_name_id" name="first_name" required></li> <li>Last name: <input type="text" name="last_name" required></li> <li>Birthday: <input type="text" name="birthday" required></li>""" ) def test_auto_id_on_form_and_field(self): # If the "id" attribute is specified in the Form and auto_id is True, the "id" # attribute in the Form gets precedence. p = PersonNew(auto_id=True) self.assertHTMLEqual( p.as_ul(), """<li><label for="first_name_id">First name:</label> <input type="text" id="first_name_id" name="first_name" required></li> <li><label for="last_name">Last name:</label> <input type="text" name="last_name" id="last_name" required></li> <li><label for="birthday">Birthday:</label> <input type="text" name="birthday" id="birthday" required></li>""" ) def test_various_boolean_values(self): class SignupForm(Form): email = EmailField() get_spam = BooleanField() f = SignupForm(auto_id=False) self.assertHTMLEqual(str(f['email']), '<input type="email" name="email" required>') self.assertHTMLEqual(str(f['get_spam']), '<input type="checkbox" name="get_spam" required>') f = SignupForm({'email': '[email protected]', 'get_spam': True}, auto_id=False) self.assertHTMLEqual(str(f['email']), '<input type="email" name="email" value="[email protected]" required>') self.assertHTMLEqual( str(f['get_spam']), '<input checked type="checkbox" name="get_spam" required>', ) # 'True' or 'true' should be rendered without a value attribute f = SignupForm({'email': '[email protected]', 'get_spam': 'True'}, auto_id=False) self.assertHTMLEqual( str(f['get_spam']), '<input checked type="checkbox" name="get_spam" required>', ) f = SignupForm({'email': '[email protected]', 'get_spam': 'true'}, auto_id=False) self.assertHTMLEqual( str(f['get_spam']), '<input checked type="checkbox" name="get_spam" required>') # A value of 'False' or 'false' should be rendered unchecked f = SignupForm({'email': '[email protected]', 'get_spam': 'False'}, auto_id=False) self.assertHTMLEqual(str(f['get_spam']), '<input type="checkbox" name="get_spam" required>') f = SignupForm({'email': '[email protected]', 'get_spam': 'false'}, auto_id=False) self.assertHTMLEqual(str(f['get_spam']), '<input type="checkbox" name="get_spam" required>') # A value of '0' should be interpreted as a True value (#16820) f = SignupForm({'email': '[email protected]', 'get_spam': '0'}) self.assertTrue(f.is_valid()) self.assertTrue(f.cleaned_data.get('get_spam')) def test_widget_output(self): # Any Field can have a Widget class passed to its constructor: class ContactForm(Form): subject = CharField() message = CharField(widget=Textarea) f = ContactForm(auto_id=False) self.assertHTMLEqual(str(f['subject']), '<input type="text" name="subject" required>') self.assertHTMLEqual(str(f['message']), '<textarea name="message" rows="10" cols="40" required></textarea>') # as_textarea(), as_text() and as_hidden() are shortcuts for changing the output # widget type: self.assertHTMLEqual( f['subject'].as_textarea(), '<textarea name="subject" rows="10" cols="40" required></textarea>', ) self.assertHTMLEqual(f['message'].as_text(), '<input type="text" name="message" required>') self.assertHTMLEqual(f['message'].as_hidden(), '<input type="hidden" name="message">') # The 'widget' parameter to a Field can also be an instance: class ContactForm(Form): subject = CharField() message = CharField(widget=Textarea(attrs={'rows': 80, 'cols': 20})) f = ContactForm(auto_id=False) self.assertHTMLEqual(str(f['message']), '<textarea name="message" rows="80" cols="20" required></textarea>') # Instance-level attrs are *not* carried over to as_textarea(), as_text() and # as_hidden(): self.assertHTMLEqual(f['message'].as_text(), '<input type="text" name="message" required>') f = ContactForm({'subject': 'Hello', 'message': 'I love you.'}, auto_id=False) self.assertHTMLEqual( f['subject'].as_textarea(), '<textarea rows="10" cols="40" name="subject" required>Hello</textarea>' ) self.assertHTMLEqual( f['message'].as_text(), '<input type="text" name="message" value="I love you." required>', ) self.assertHTMLEqual(f['message'].as_hidden(), '<input type="hidden" name="message" value="I love you.">') def test_forms_with_choices(self): # For a form with a <select>, use ChoiceField: class FrameworkForm(Form): name = CharField() language = ChoiceField(choices=[('P', 'Python'), ('J', 'Java')]) f = FrameworkForm(auto_id=False) self.assertHTMLEqual(str(f['language']), """<select name="language"> <option value="P">Python</option> <option value="J">Java</option> </select>""") f = FrameworkForm({'name': 'Django', 'language': 'P'}, auto_id=False) self.assertHTMLEqual(str(f['language']), """<select name="language"> <option value="P" selected>Python</option> <option value="J">Java</option> </select>""") # A subtlety: If one of the choices' value is the empty string and the form is # unbound, then the <option> for the empty-string choice will get selected. class FrameworkForm(Form): name = CharField() language = ChoiceField(choices=[('', '------'), ('P', 'Python'), ('J', 'Java')]) f = FrameworkForm(auto_id=False) self.assertHTMLEqual(str(f['language']), """<select name="language" required> <option value="" selected>------</option> <option value="P">Python</option> <option value="J">Java</option> </select>""") # You can specify widget attributes in the Widget constructor. class FrameworkForm(Form): name = CharField() language = ChoiceField(choices=[('P', 'Python'), ('J', 'Java')], widget=Select(attrs={'class': 'foo'})) f = FrameworkForm(auto_id=False) self.assertHTMLEqual(str(f['language']), """<select class="foo" name="language"> <option value="P">Python</option> <option value="J">Java</option> </select>""") f = FrameworkForm({'name': 'Django', 'language': 'P'}, auto_id=False) self.assertHTMLEqual(str(f['language']), """<select class="foo" name="language"> <option value="P" selected>Python</option> <option value="J">Java</option> </select>""") # When passing a custom widget instance to ChoiceField, note that setting # 'choices' on the widget is meaningless. The widget will use the choices # defined on the Field, not the ones defined on the Widget. class FrameworkForm(Form): name = CharField() language = ChoiceField( choices=[('P', 'Python'), ('J', 'Java')], widget=Select(choices=[('R', 'Ruby'), ('P', 'Perl')], attrs={'class': 'foo'}), ) f = FrameworkForm(auto_id=False) self.assertHTMLEqual(str(f['language']), """<select class="foo" name="language"> <option value="P">Python</option> <option value="J">Java</option> </select>""") f = FrameworkForm({'name': 'Django', 'language': 'P'}, auto_id=False) self.assertHTMLEqual(str(f['language']), """<select class="foo" name="language"> <option value="P" selected>Python</option> <option value="J">Java</option> </select>""") # You can set a ChoiceField's choices after the fact. class FrameworkForm(Form): name = CharField() language = ChoiceField() f = FrameworkForm(auto_id=False) self.assertHTMLEqual(str(f['language']), """<select name="language"> </select>""") f.fields['language'].choices = [('P', 'Python'), ('J', 'Java')] self.assertHTMLEqual(str(f['language']), """<select name="language"> <option value="P">Python</option> <option value="J">Java</option> </select>""") def test_forms_with_radio(self): # Add widget=RadioSelect to use that widget with a ChoiceField. class FrameworkForm(Form): name = CharField() language = ChoiceField(choices=[('P', 'Python'), ('J', 'Java')], widget=RadioSelect) f = FrameworkForm(auto_id=False) self.assertHTMLEqual(str(f['language']), """<ul> <li><label><input type="radio" name="language" value="P" required> Python</label></li> <li><label><input type="radio" name="language" value="J" required> Java</label></li> </ul>""") self.assertHTMLEqual(f.as_table(), """<tr><th>Name:</th><td><input type="text" name="name" required></td></tr> <tr><th>Language:</th><td><ul> <li><label><input type="radio" name="language" value="P" required> Python</label></li> <li><label><input type="radio" name="language" value="J" required> Java</label></li> </ul></td></tr>""") self.assertHTMLEqual(f.as_ul(), """<li>Name: <input type="text" name="name" required></li> <li>Language: <ul> <li><label><input type="radio" name="language" value="P" required> Python</label></li> <li><label><input type="radio" name="language" value="J" required> Java</label></li> </ul></li>""") # Regarding auto_id and <label>, RadioSelect is a special case. Each radio button # gets a distinct ID, formed by appending an underscore plus the button's # zero-based index. f = FrameworkForm(auto_id='id_%s') self.assertHTMLEqual( str(f['language']), """<ul id="id_language"> <li><label for="id_language_0"><input type="radio" id="id_language_0" value="P" name="language" required> Python</label></li> <li><label for="id_language_1"><input type="radio" id="id_language_1" value="J" name="language" required> Java</label></li> </ul>""" ) # When RadioSelect is used with auto_id, and the whole form is printed using # either as_table() or as_ul(), the label for the RadioSelect will point to the # ID of the *first* radio button. self.assertHTMLEqual( f.as_table(), """<tr><th><label for="id_name">Name:</label></th><td><input type="text" name="name" id="id_name" required></td></tr> <tr><th><label for="id_language_0">Language:</label></th><td><ul id="id_language"> <li><label for="id_language_0"><input type="radio" id="id_language_0" value="P" name="language" required> Python</label></li> <li><label for="id_language_1"><input type="radio" id="id_language_1" value="J" name="language" required> Java</label></li> </ul></td></tr>""" ) self.assertHTMLEqual( f.as_ul(), """<li><label for="id_name">Name:</label> <input type="text" name="name" id="id_name" required></li> <li><label for="id_language_0">Language:</label> <ul id="id_language"> <li><label for="id_language_0"><input type="radio" id="id_language_0" value="P" name="language" required> Python</label></li> <li><label for="id_language_1"><input type="radio" id="id_language_1" value="J" name="language" required> Java</label></li> </ul></li>""" ) self.assertHTMLEqual( f.as_p(), """<p><label for="id_name">Name:</label> <input type="text" name="name" id="id_name" required></p> <p><label for="id_language_0">Language:</label> <ul id="id_language"> <li><label for="id_language_0"><input type="radio" id="id_language_0" value="P" name="language" required> Python</label></li> <li><label for="id_language_1"><input type="radio" id="id_language_1" value="J" name="language" required> Java</label></li> </ul></p>""" ) # Test iterating on individual radios in a template t = Template('{% for radio in form.language %}<div class="myradio">{{ radio }}</div>{% endfor %}') self.assertHTMLEqual( t.render(Context({'form': f})), """<div class="myradio"><label for="id_language_0"> <input id="id_language_0" name="language" type="radio" value="P" required> Python</label></div> <div class="myradio"><label for="id_language_1"> <input id="id_language_1" name="language" type="radio" value="J" required> Java</label></div>""" ) def test_form_with_iterable_boundfield(self): class BeatleForm(Form): name = ChoiceField( choices=[('john', 'John'), ('paul', 'Paul'), ('george', 'George'), ('ringo', 'Ringo')], widget=RadioSelect, ) f = BeatleForm(auto_id=False) self.assertHTMLEqual( '\n'.join(str(bf) for bf in f['name']), """<label><input type="radio" name="name" value="john" required> John</label> <label><input type="radio" name="name" value="paul" required> Paul</label> <label><input type="radio" name="name" value="george" required> George</label> <label><input type="radio" name="name" value="ringo" required> Ringo</label>""" ) self.assertHTMLEqual( '\n'.join('<div>%s</div>' % bf for bf in f['name']), """<div><label><input type="radio" name="name" value="john" required> John</label></div> <div><label><input type="radio" name="name" value="paul" required> Paul</label></div> <div><label><input type="radio" name="name" value="george" required> George</label></div> <div><label><input type="radio" name="name" value="ringo" required> Ringo</label></div>""" ) def test_form_with_iterable_boundfield_id(self): class BeatleForm(Form): name = ChoiceField( choices=[('john', 'John'), ('paul', 'Paul'), ('george', 'George'), ('ringo', 'Ringo')], widget=RadioSelect, ) fields = list(BeatleForm()['name']) self.assertEqual(len(fields), 4) self.assertEqual(fields[0].id_for_label, 'id_name_0') self.assertEqual(fields[0].choice_label, 'John') self.assertHTMLEqual( fields[0].tag(), '<input type="radio" name="name" value="john" id="id_name_0" required>' ) self.assertHTMLEqual( str(fields[0]), '<label for="id_name_0"><input type="radio" name="name" ' 'value="john" id="id_name_0" required> John</label>' ) self.assertEqual(fields[1].id_for_label, 'id_name_1') self.assertEqual(fields[1].choice_label, 'Paul') self.assertHTMLEqual( fields[1].tag(), '<input type="radio" name="name" value="paul" id="id_name_1" required>' ) self.assertHTMLEqual( str(fields[1]), '<label for="id_name_1"><input type="radio" name="name" ' 'value="paul" id="id_name_1" required> Paul</label>' ) def test_iterable_boundfield_select(self): class BeatleForm(Form): name = ChoiceField(choices=[('john', 'John'), ('paul', 'Paul'), ('george', 'George'), ('ringo', 'Ringo')]) fields = list(BeatleForm(auto_id=False)['name']) self.assertEqual(len(fields), 4) self.assertEqual(fields[0].id_for_label, 'id_name_0') self.assertEqual(fields[0].choice_label, 'John') self.assertHTMLEqual(fields[0].tag(), '<option value="john">John</option>') self.assertHTMLEqual(str(fields[0]), '<option value="john">John</option>') def test_form_with_noniterable_boundfield(self): # You can iterate over any BoundField, not just those with widget=RadioSelect. class BeatleForm(Form): name = CharField() f = BeatleForm(auto_id=False) self.assertHTMLEqual('\n'.join(str(bf) for bf in f['name']), '<input type="text" name="name" required>') def test_boundfield_slice(self): class BeatleForm(Form): name = ChoiceField( choices=[('john', 'John'), ('paul', 'Paul'), ('george', 'George'), ('ringo', 'Ringo')], widget=RadioSelect, ) f = BeatleForm() bf = f['name'] self.assertEqual( [str(item) for item in bf[1:]], [str(bf[1]), str(bf[2]), str(bf[3])], ) def test_boundfield_invalid_index(self): class TestForm(Form): name = ChoiceField(choices=[]) field = TestForm()['name'] msg = 'BoundField indices must be integers or slices, not str.' with self.assertRaisesMessage(TypeError, msg): field['foo'] def test_boundfield_bool(self): """BoundField without any choices (subwidgets) evaluates to True.""" class TestForm(Form): name = ChoiceField(choices=[]) self.assertIs(bool(TestForm()['name']), True) def test_forms_with_multiple_choice(self): # MultipleChoiceField is a special case, as its data is required to be a list: class SongForm(Form): name = CharField() composers = MultipleChoiceField() f = SongForm(auto_id=False) self.assertHTMLEqual(str(f['composers']), """<select multiple name="composers" required> </select>""") class SongForm(Form): name = CharField() composers = MultipleChoiceField(choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')]) f = SongForm(auto_id=False) self.assertHTMLEqual(str(f['composers']), """<select multiple name="composers" required> <option value="J">John Lennon</option> <option value="P">Paul McCartney</option> </select>""") f = SongForm({'name': 'Yesterday', 'composers': ['P']}, auto_id=False) self.assertHTMLEqual(str(f['name']), '<input type="text" name="name" value="Yesterday" required>') self.assertHTMLEqual(str(f['composers']), """<select multiple name="composers" required> <option value="J">John Lennon</option> <option value="P" selected>Paul McCartney</option> </select>""") def test_form_with_disabled_fields(self): class PersonForm(Form): name = CharField() birthday = DateField(disabled=True) class PersonFormFieldInitial(Form): name = CharField() birthday = DateField(disabled=True, initial=datetime.date(1974, 8, 16)) # Disabled fields are generally not transmitted by user agents. # The value from the form's initial data is used. f1 = PersonForm({'name': 'John Doe'}, initial={'birthday': datetime.date(1974, 8, 16)}) f2 = PersonFormFieldInitial({'name': 'John Doe'}) for form in (f1, f2): self.assertTrue(form.is_valid()) self.assertEqual( form.cleaned_data, {'birthday': datetime.date(1974, 8, 16), 'name': 'John Doe'} ) # Values provided in the form's data are ignored. data = {'name': 'John Doe', 'birthday': '1984-11-10'} f1 = PersonForm(data, initial={'birthday': datetime.date(1974, 8, 16)}) f2 = PersonFormFieldInitial(data) for form in (f1, f2): self.assertTrue(form.is_valid()) self.assertEqual( form.cleaned_data, {'birthday': datetime.date(1974, 8, 16), 'name': 'John Doe'} ) # Initial data remains present on invalid forms. data = {} f1 = PersonForm(data, initial={'birthday': datetime.date(1974, 8, 16)}) f2 = PersonFormFieldInitial(data) for form in (f1, f2): self.assertFalse(form.is_valid()) self.assertEqual(form['birthday'].value(), datetime.date(1974, 8, 16)) def test_hidden_data(self): class SongForm(Form): name = CharField() composers = MultipleChoiceField(choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')]) # MultipleChoiceField rendered as_hidden() is a special case. Because it can # have multiple values, its as_hidden() renders multiple <input type="hidden"> # tags. f = SongForm({'name': 'Yesterday', 'composers': ['P']}, auto_id=False) self.assertHTMLEqual(f['composers'].as_hidden(), '<input type="hidden" name="composers" value="P">') f = SongForm({'name': 'From Me To You', 'composers': ['P', 'J']}, auto_id=False) self.assertHTMLEqual(f['composers'].as_hidden(), """<input type="hidden" name="composers" value="P"> <input type="hidden" name="composers" value="J">""") # DateTimeField rendered as_hidden() is special too class MessageForm(Form): when = SplitDateTimeField() f = MessageForm({'when_0': '1992-01-01', 'when_1': '01:01'}) self.assertTrue(f.is_valid()) self.assertHTMLEqual( str(f['when']), '<input type="text" name="when_0" value="1992-01-01" id="id_when_0" required>' '<input type="text" name="when_1" value="01:01" id="id_when_1" required>' ) self.assertHTMLEqual( f['when'].as_hidden(), '<input type="hidden" name="when_0" value="1992-01-01" id="id_when_0">' '<input type="hidden" name="when_1" value="01:01" id="id_when_1">' ) def test_multiple_choice_checkbox(self): # MultipleChoiceField can also be used with the CheckboxSelectMultiple widget. class SongForm(Form): name = CharField() composers = MultipleChoiceField( choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')], widget=CheckboxSelectMultiple, ) f = SongForm(auto_id=False) self.assertHTMLEqual(str(f['composers']), """<ul> <li><label><input type="checkbox" name="composers" value="J"> John Lennon</label></li> <li><label><input type="checkbox" name="composers" value="P"> Paul McCartney</label></li> </ul>""") f = SongForm({'composers': ['J']}, auto_id=False) self.assertHTMLEqual(str(f['composers']), """<ul> <li><label><input checked type="checkbox" name="composers" value="J"> John Lennon</label></li> <li><label><input type="checkbox" name="composers" value="P"> Paul McCartney</label></li> </ul>""") f = SongForm({'composers': ['J', 'P']}, auto_id=False) self.assertHTMLEqual(str(f['composers']), """<ul> <li><label><input checked type="checkbox" name="composers" value="J"> John Lennon</label></li> <li><label><input checked type="checkbox" name="composers" value="P"> Paul McCartney</label></li> </ul>""") # Test iterating on individual checkboxes in a template t = Template('{% for checkbox in form.composers %}<div class="mycheckbox">{{ checkbox }}</div>{% endfor %}') self.assertHTMLEqual(t.render(Context({'form': f})), """<div class="mycheckbox"><label> <input checked name="composers" type="checkbox" value="J"> John Lennon</label></div> <div class="mycheckbox"><label> <input checked name="composers" type="checkbox" value="P"> Paul McCartney</label></div>""") def test_checkbox_auto_id(self): # Regarding auto_id, CheckboxSelectMultiple is a special case. Each checkbox # gets a distinct ID, formed by appending an underscore plus the checkbox's # zero-based index. class SongForm(Form): name = CharField() composers = MultipleChoiceField( choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')], widget=CheckboxSelectMultiple, ) f = SongForm(auto_id='%s_id') self.assertHTMLEqual( str(f['composers']), """<ul id="composers_id"> <li><label for="composers_id_0"> <input type="checkbox" name="composers" value="J" id="composers_id_0"> John Lennon</label></li> <li><label for="composers_id_1"> <input type="checkbox" name="composers" value="P" id="composers_id_1"> Paul McCartney</label></li> </ul>""" ) def test_multiple_choice_list_data(self): # Data for a MultipleChoiceField should be a list. QueryDict and # MultiValueDict conveniently work with this. class SongForm(Form): name = CharField() composers = MultipleChoiceField( choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')], widget=CheckboxSelectMultiple, ) data = {'name': 'Yesterday', 'composers': ['J', 'P']} f = SongForm(data) self.assertEqual(f.errors, {}) data = QueryDict('name=Yesterday&composers=J&composers=P') f = SongForm(data) self.assertEqual(f.errors, {}) data = MultiValueDict({'name': ['Yesterday'], 'composers': ['J', 'P']}) f = SongForm(data) self.assertEqual(f.errors, {}) # SelectMultiple uses ducktyping so that MultiValueDictLike.getlist() # is called. f = SongForm(MultiValueDictLike({'name': 'Yesterday', 'composers': 'J'})) self.assertEqual(f.errors, {}) self.assertEqual(f.cleaned_data['composers'], ['J']) def test_multiple_hidden(self): class SongForm(Form): name = CharField() composers = MultipleChoiceField( choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')], widget=CheckboxSelectMultiple, ) # The MultipleHiddenInput widget renders multiple values as hidden fields. class SongFormHidden(Form): name = CharField() composers = MultipleChoiceField( choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')], widget=MultipleHiddenInput, ) f = SongFormHidden(MultiValueDict({'name': ['Yesterday'], 'composers': ['J', 'P']}), auto_id=False) self.assertHTMLEqual( f.as_ul(), """<li>Name: <input type="text" name="name" value="Yesterday" required> <input type="hidden" name="composers" value="J"> <input type="hidden" name="composers" value="P"></li>""" ) # When using CheckboxSelectMultiple, the framework expects a list of input and # returns a list of input. f = SongForm({'name': 'Yesterday'}, auto_id=False) self.assertEqual(f.errors['composers'], ['This field is required.']) f = SongForm({'name': 'Yesterday', 'composers': ['J']}, auto_id=False) self.assertEqual(f.errors, {}) self.assertEqual(f.cleaned_data['composers'], ['J']) self.assertEqual(f.cleaned_data['name'], 'Yesterday') f = SongForm({'name': 'Yesterday', 'composers': ['J', 'P']}, auto_id=False) self.assertEqual(f.errors, {}) self.assertEqual(f.cleaned_data['composers'], ['J', 'P']) self.assertEqual(f.cleaned_data['name'], 'Yesterday') # MultipleHiddenInput uses ducktyping so that # MultiValueDictLike.getlist() is called. f = SongForm(MultiValueDictLike({'name': 'Yesterday', 'composers': 'J'})) self.assertEqual(f.errors, {}) self.assertEqual(f.cleaned_data['composers'], ['J']) def test_escaping(self): # Validation errors are HTML-escaped when output as HTML. class EscapingForm(Form): special_name = CharField(label="<em>Special</em> Field") special_safe_name = CharField(label=mark_safe("<em>Special</em> Field")) def clean_special_name(self): raise ValidationError("Something's wrong with '%s'" % self.cleaned_data['special_name']) def clean_special_safe_name(self): raise ValidationError( mark_safe("'<b>%s</b>' is a safe string" % self.cleaned_data['special_safe_name']) ) f = EscapingForm({ 'special_name': "Nothing to escape", 'special_safe_name': "Nothing to escape", }, auto_id=False) self.assertHTMLEqual( f.as_table(), """<tr><th>&lt;em&gt;Special&lt;/em&gt; Field:</th><td> <ul class="errorlist"><li>Something&#x27;s wrong with &#x27;Nothing to escape&#x27;</li></ul> <input type="text" name="special_name" value="Nothing to escape" required></td></tr> <tr><th><em>Special</em> Field:</th><td> <ul class="errorlist"><li>'<b>Nothing to escape</b>' is a safe string</li></ul> <input type="text" name="special_safe_name" value="Nothing to escape" required></td></tr>""" ) f = EscapingForm({ 'special_name': "Should escape < & > and <script>alert('xss')</script>", 'special_safe_name': "<i>Do not escape</i>" }, auto_id=False) self.assertHTMLEqual( f.as_table(), """<tr><th>&lt;em&gt;Special&lt;/em&gt; Field:</th><td> <ul class="errorlist"><li>Something&#x27;s wrong with &#x27;Should escape &lt; &amp; &gt; and &lt;script&gt;alert(&#x27;xss&#x27;)&lt;/script&gt;&#x27;</li></ul> <input type="text" name="special_name" value="Should escape &lt; &amp; &gt; and &lt;script&gt;alert(&#x27;xss&#x27;)&lt;/script&gt;" required></td></tr> <tr><th><em>Special</em> Field:</th><td> <ul class="errorlist"><li>'<b><i>Do not escape</i></b>' is a safe string</li></ul> <input type="text" name="special_safe_name" value="&lt;i&gt;Do not escape&lt;/i&gt;" required></td></tr>""" ) def test_validating_multiple_fields(self): # There are a couple of ways to do multiple-field validation. If you want the # validation message to be associated with a particular field, implement the # clean_XXX() method on the Form, where XXX is the field name. As in # Field.clean(), the clean_XXX() method should return the cleaned value. In the # clean_XXX() method, you have access to self.cleaned_data, which is a dictionary # of all the data that has been cleaned *so far*, in order by the fields, # including the current field (e.g., the field XXX if you're in clean_XXX()). class UserRegistration(Form): username = CharField(max_length=10) password1 = CharField(widget=PasswordInput) password2 = CharField(widget=PasswordInput) def clean_password2(self): if (self.cleaned_data.get('password1') and self.cleaned_data.get('password2') and self.cleaned_data['password1'] != self.cleaned_data['password2']): raise ValidationError('Please make sure your passwords match.') return self.cleaned_data['password2'] f = UserRegistration(auto_id=False) self.assertEqual(f.errors, {}) f = UserRegistration({}, auto_id=False) self.assertEqual(f.errors['username'], ['This field is required.']) self.assertEqual(f.errors['password1'], ['This field is required.']) self.assertEqual(f.errors['password2'], ['This field is required.']) f = UserRegistration({'username': 'adrian', 'password1': 'foo', 'password2': 'bar'}, auto_id=False) self.assertEqual(f.errors['password2'], ['Please make sure your passwords match.']) f = UserRegistration({'username': 'adrian', 'password1': 'foo', 'password2': 'foo'}, auto_id=False) self.assertEqual(f.errors, {}) self.assertEqual(f.cleaned_data['username'], 'adrian') self.assertEqual(f.cleaned_data['password1'], 'foo') self.assertEqual(f.cleaned_data['password2'], 'foo') # Another way of doing multiple-field validation is by implementing the # Form's clean() method. Usually ValidationError raised by that method # will not be associated with a particular field and will have a # special-case association with the field named '__all__'. It's # possible to associate the errors to particular field with the # Form.add_error() method or by passing a dictionary that maps each # field to one or more errors. # # Note that in Form.clean(), you have access to self.cleaned_data, a # dictionary of all the fields/values that have *not* raised a # ValidationError. Also note Form.clean() is required to return a # dictionary of all clean data. class UserRegistration(Form): username = CharField(max_length=10) password1 = CharField(widget=PasswordInput) password2 = CharField(widget=PasswordInput) def clean(self): # Test raising a ValidationError as NON_FIELD_ERRORS. if (self.cleaned_data.get('password1') and self.cleaned_data.get('password2') and self.cleaned_data['password1'] != self.cleaned_data['password2']): raise ValidationError('Please make sure your passwords match.') # Test raising ValidationError that targets multiple fields. errors = {} if self.cleaned_data.get('password1') == 'FORBIDDEN_VALUE': errors['password1'] = 'Forbidden value.' if self.cleaned_data.get('password2') == 'FORBIDDEN_VALUE': errors['password2'] = ['Forbidden value.'] if errors: raise ValidationError(errors) # Test Form.add_error() if self.cleaned_data.get('password1') == 'FORBIDDEN_VALUE2': self.add_error(None, 'Non-field error 1.') self.add_error('password1', 'Forbidden value 2.') if self.cleaned_data.get('password2') == 'FORBIDDEN_VALUE2': self.add_error('password2', 'Forbidden value 2.') raise ValidationError('Non-field error 2.') return self.cleaned_data f = UserRegistration(auto_id=False) self.assertEqual(f.errors, {}) f = UserRegistration({}, auto_id=False) self.assertHTMLEqual( f.as_table(), """<tr><th>Username:</th><td> <ul class="errorlist"><li>This field is required.</li></ul> <input type="text" name="username" maxlength="10" required></td></tr> <tr><th>Password1:</th><td><ul class="errorlist"><li>This field is required.</li></ul> <input type="password" name="password1" required></td></tr> <tr><th>Password2:</th><td><ul class="errorlist"><li>This field is required.</li></ul> <input type="password" name="password2" required></td></tr>""" ) self.assertEqual(f.errors['username'], ['This field is required.']) self.assertEqual(f.errors['password1'], ['This field is required.']) self.assertEqual(f.errors['password2'], ['This field is required.']) f = UserRegistration({'username': 'adrian', 'password1': 'foo', 'password2': 'bar'}, auto_id=False) self.assertEqual(f.errors['__all__'], ['Please make sure your passwords match.']) self.assertHTMLEqual( f.as_table(), """<tr><td colspan="2"> <ul class="errorlist nonfield"><li>Please make sure your passwords match.</li></ul></td></tr> <tr><th>Username:</th><td><input type="text" name="username" value="adrian" maxlength="10" required></td></tr> <tr><th>Password1:</th><td><input type="password" name="password1" required></td></tr> <tr><th>Password2:</th><td><input type="password" name="password2" required></td></tr>""" ) self.assertHTMLEqual( f.as_ul(), """<li><ul class="errorlist nonfield"> <li>Please make sure your passwords match.</li></ul></li> <li>Username: <input type="text" name="username" value="adrian" maxlength="10" required></li> <li>Password1: <input type="password" name="password1" required></li> <li>Password2: <input type="password" name="password2" required></li>""" ) f = UserRegistration({'username': 'adrian', 'password1': 'foo', 'password2': 'foo'}, auto_id=False) self.assertEqual(f.errors, {}) self.assertEqual(f.cleaned_data['username'], 'adrian') self.assertEqual(f.cleaned_data['password1'], 'foo') self.assertEqual(f.cleaned_data['password2'], 'foo') f = UserRegistration({ 'username': 'adrian', 'password1': 'FORBIDDEN_VALUE', 'password2': 'FORBIDDEN_VALUE', }, auto_id=False) self.assertEqual(f.errors['password1'], ['Forbidden value.']) self.assertEqual(f.errors['password2'], ['Forbidden value.']) f = UserRegistration({ 'username': 'adrian', 'password1': 'FORBIDDEN_VALUE2', 'password2': 'FORBIDDEN_VALUE2', }, auto_id=False) self.assertEqual(f.errors['__all__'], ['Non-field error 1.', 'Non-field error 2.']) self.assertEqual(f.errors['password1'], ['Forbidden value 2.']) self.assertEqual(f.errors['password2'], ['Forbidden value 2.']) with self.assertRaisesMessage(ValueError, "has no field named"): f.add_error('missing_field', 'Some error.') def test_update_error_dict(self): class CodeForm(Form): code = CharField(max_length=10) def clean(self): try: raise ValidationError({'code': [ValidationError('Code error 1.')]}) except ValidationError as e: self._errors = e.update_error_dict(self._errors) try: raise ValidationError({'code': [ValidationError('Code error 2.')]}) except ValidationError as e: self._errors = e.update_error_dict(self._errors) try: raise ValidationError({'code': forms.ErrorList(['Code error 3.'])}) except ValidationError as e: self._errors = e.update_error_dict(self._errors) try: raise ValidationError('Non-field error 1.') except ValidationError as e: self._errors = e.update_error_dict(self._errors) try: raise ValidationError([ValidationError('Non-field error 2.')]) except ValidationError as e: self._errors = e.update_error_dict(self._errors) # The newly added list of errors is an instance of ErrorList. for field, error_list in self._errors.items(): if not isinstance(error_list, self.error_class): self._errors[field] = self.error_class(error_list) form = CodeForm({'code': 'hello'}) # Trigger validation. self.assertFalse(form.is_valid()) # update_error_dict didn't lose track of the ErrorDict type. self.assertIsInstance(form._errors, forms.ErrorDict) self.assertEqual(dict(form.errors), { 'code': ['Code error 1.', 'Code error 2.', 'Code error 3.'], NON_FIELD_ERRORS: ['Non-field error 1.', 'Non-field error 2.'], }) def test_has_error(self): class UserRegistration(Form): username = CharField(max_length=10) password1 = CharField(widget=PasswordInput, min_length=5) password2 = CharField(widget=PasswordInput) def clean(self): if (self.cleaned_data.get('password1') and self.cleaned_data.get('password2') and self.cleaned_data['password1'] != self.cleaned_data['password2']): raise ValidationError( 'Please make sure your passwords match.', code='password_mismatch', ) f = UserRegistration(data={}) self.assertTrue(f.has_error('password1')) self.assertTrue(f.has_error('password1', 'required')) self.assertFalse(f.has_error('password1', 'anything')) f = UserRegistration(data={'password1': 'Hi', 'password2': 'Hi'}) self.assertTrue(f.has_error('password1')) self.assertTrue(f.has_error('password1', 'min_length')) self.assertFalse(f.has_error('password1', 'anything')) self.assertFalse(f.has_error('password2')) self.assertFalse(f.has_error('password2', 'anything')) f = UserRegistration(data={'password1': 'Bonjour', 'password2': 'Hello'}) self.assertFalse(f.has_error('password1')) self.assertFalse(f.has_error('password1', 'required')) self.assertTrue(f.has_error(NON_FIELD_ERRORS)) self.assertTrue(f.has_error(NON_FIELD_ERRORS, 'password_mismatch')) self.assertFalse(f.has_error(NON_FIELD_ERRORS, 'anything')) def test_dynamic_construction(self): # It's possible to construct a Form dynamically by adding to the self.fields # dictionary in __init__(). Don't forget to call Form.__init__() within the # subclass' __init__(). class Person(Form): first_name = CharField() last_name = CharField() def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.fields['birthday'] = DateField() p = Person(auto_id=False) self.assertHTMLEqual( p.as_table(), """<tr><th>First name:</th><td><input type="text" name="first_name" required></td></tr> <tr><th>Last name:</th><td><input type="text" name="last_name" required></td></tr> <tr><th>Birthday:</th><td><input type="text" name="birthday" required></td></tr>""" ) # Instances of a dynamic Form do not persist fields from one Form instance to # the next. class MyForm(Form): def __init__(self, data=None, auto_id=False, field_list=[]): Form.__init__(self, data, auto_id=auto_id) for field in field_list: self.fields[field[0]] = field[1] field_list = [('field1', CharField()), ('field2', CharField())] my_form = MyForm(field_list=field_list) self.assertHTMLEqual( my_form.as_table(), """<tr><th>Field1:</th><td><input type="text" name="field1" required></td></tr> <tr><th>Field2:</th><td><input type="text" name="field2" required></td></tr>""" ) field_list = [('field3', CharField()), ('field4', CharField())] my_form = MyForm(field_list=field_list) self.assertHTMLEqual( my_form.as_table(), """<tr><th>Field3:</th><td><input type="text" name="field3" required></td></tr> <tr><th>Field4:</th><td><input type="text" name="field4" required></td></tr>""" ) class MyForm(Form): default_field_1 = CharField() default_field_2 = CharField() def __init__(self, data=None, auto_id=False, field_list=[]): Form.__init__(self, data, auto_id=auto_id) for field in field_list: self.fields[field[0]] = field[1] field_list = [('field1', CharField()), ('field2', CharField())] my_form = MyForm(field_list=field_list) self.assertHTMLEqual( my_form.as_table(), """<tr><th>Default field 1:</th><td><input type="text" name="default_field_1" required></td></tr> <tr><th>Default field 2:</th><td><input type="text" name="default_field_2" required></td></tr> <tr><th>Field1:</th><td><input type="text" name="field1" required></td></tr> <tr><th>Field2:</th><td><input type="text" name="field2" required></td></tr>""" ) field_list = [('field3', CharField()), ('field4', CharField())] my_form = MyForm(field_list=field_list) self.assertHTMLEqual( my_form.as_table(), """<tr><th>Default field 1:</th><td><input type="text" name="default_field_1" required></td></tr> <tr><th>Default field 2:</th><td><input type="text" name="default_field_2" required></td></tr> <tr><th>Field3:</th><td><input type="text" name="field3" required></td></tr> <tr><th>Field4:</th><td><input type="text" name="field4" required></td></tr>""" ) # Similarly, changes to field attributes do not persist from one Form instance # to the next. class Person(Form): first_name = CharField(required=False) last_name = CharField(required=False) def __init__(self, names_required=False, *args, **kwargs): super().__init__(*args, **kwargs) if names_required: self.fields['first_name'].required = True self.fields['first_name'].widget.attrs['class'] = 'required' self.fields['last_name'].required = True self.fields['last_name'].widget.attrs['class'] = 'required' f = Person(names_required=False) self.assertEqual(f['first_name'].field.required, f['last_name'].field.required, (False, False)) self.assertEqual(f['first_name'].field.widget.attrs, f['last_name'].field.widget.attrs, ({}, {})) f = Person(names_required=True) self.assertEqual(f['first_name'].field.required, f['last_name'].field.required, (True, True)) self.assertEqual( f['first_name'].field.widget.attrs, f['last_name'].field.widget.attrs, ({'class': 'reuired'}, {'class': 'required'}) ) f = Person(names_required=False) self.assertEqual(f['first_name'].field.required, f['last_name'].field.required, (False, False)) self.assertEqual(f['first_name'].field.widget.attrs, f['last_name'].field.widget.attrs, ({}, {})) class Person(Form): first_name = CharField(max_length=30) last_name = CharField(max_length=30) def __init__(self, name_max_length=None, *args, **kwargs): super().__init__(*args, **kwargs) if name_max_length: self.fields['first_name'].max_length = name_max_length self.fields['last_name'].max_length = name_max_length f = Person(name_max_length=None) self.assertEqual(f['first_name'].field.max_length, f['last_name'].field.max_length, (30, 30)) f = Person(name_max_length=20) self.assertEqual(f['first_name'].field.max_length, f['last_name'].field.max_length, (20, 20)) f = Person(name_max_length=None) self.assertEqual(f['first_name'].field.max_length, f['last_name'].field.max_length, (30, 30)) # Similarly, choices do not persist from one Form instance to the next. # Refs #15127. class Person(Form): first_name = CharField(required=False) last_name = CharField(required=False) gender = ChoiceField(choices=(('f', 'Female'), ('m', 'Male'))) def __init__(self, allow_unspec_gender=False, *args, **kwargs): super().__init__(*args, **kwargs) if allow_unspec_gender: self.fields['gender'].choices += (('u', 'Unspecified'),) f = Person() self.assertEqual(f['gender'].field.choices, [('f', 'Female'), ('m', 'Male')]) f = Person(allow_unspec_gender=True) self.assertEqual(f['gender'].field.choices, [('f', 'Female'), ('m', 'Male'), ('u', 'Unspecified')]) f = Person() self.assertEqual(f['gender'].field.choices, [('f', 'Female'), ('m', 'Male')]) def test_validators_independence(self): """ The list of form field validators can be modified without polluting other forms. """ class MyForm(Form): myfield = CharField(max_length=25) f1 = MyForm() f2 = MyForm() f1.fields['myfield'].validators[0] = MaxValueValidator(12) self.assertNotEqual(f1.fields['myfield'].validators[0], f2.fields['myfield'].validators[0]) def test_hidden_widget(self): # HiddenInput widgets are displayed differently in the as_table(), as_ul()) # and as_p() output of a Form -- their verbose names are not displayed, and a # separate row is not displayed. They're displayed in the last row of the # form, directly after that row's form element. class Person(Form): first_name = CharField() last_name = CharField() hidden_text = CharField(widget=HiddenInput) birthday = DateField() p = Person(auto_id=False) self.assertHTMLEqual( p.as_table(), """<tr><th>First name:</th><td><input type="text" name="first_name" required></td></tr> <tr><th>Last name:</th><td><input type="text" name="last_name" required></td></tr> <tr><th>Birthday:</th> <td><input type="text" name="birthday" required><input type="hidden" name="hidden_text"></td></tr>""" ) self.assertHTMLEqual( p.as_ul(), """<li>First name: <input type="text" name="first_name" required></li> <li>Last name: <input type="text" name="last_name" required></li> <li>Birthday: <input type="text" name="birthday" required><input type="hidden" name="hidden_text"></li>""" ) self.assertHTMLEqual( p.as_p(), """<p>First name: <input type="text" name="first_name" required></p> <p>Last name: <input type="text" name="last_name" required></p> <p>Birthday: <input type="text" name="birthday" required><input type="hidden" name="hidden_text"></p>""" ) # With auto_id set, a HiddenInput still gets an ID, but it doesn't get a label. p = Person(auto_id='id_%s') self.assertHTMLEqual( p.as_table(), """<tr><th><label for="id_first_name">First name:</label></th><td> <input type="text" name="first_name" id="id_first_name" required></td></tr> <tr><th><label for="id_last_name">Last name:</label></th><td> <input type="text" name="last_name" id="id_last_name" required></td></tr> <tr><th><label for="id_birthday">Birthday:</label></th><td> <input type="text" name="birthday" id="id_birthday" required> <input type="hidden" name="hidden_text" id="id_hidden_text"></td></tr>""" ) self.assertHTMLEqual( p.as_ul(), """<li><label for="id_first_name">First name:</label> <input type="text" name="first_name" id="id_first_name" required></li> <li><label for="id_last_name">Last name:</label> <input type="text" name="last_name" id="id_last_name" required></li> <li><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" id="id_birthday" required> <input type="hidden" name="hidden_text" id="id_hidden_text"></li>""" ) self.assertHTMLEqual( p.as_p(), """<p><label for="id_first_name">First name:</label> <input type="text" name="first_name" id="id_first_name" required></p> <p><label for="id_last_name">Last name:</label> <input type="text" name="last_name" id="id_last_name" required></p> <p><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" id="id_birthday" required> <input type="hidden" name="hidden_text" id="id_hidden_text"></p>""" ) # If a field with a HiddenInput has errors, the as_table() and as_ul() output # will include the error message(s) with the text "(Hidden field [fieldname]) " # prepended. This message is displayed at the top of the output, regardless of # its field's order in the form. p = Person({'first_name': 'John', 'last_name': 'Lennon', 'birthday': '1940-10-9'}, auto_id=False) self.assertHTMLEqual( p.as_table(), """<tr><td colspan="2"> <ul class="errorlist nonfield"><li>(Hidden field hidden_text) This field is required.</li></ul></td></tr> <tr><th>First name:</th><td><input type="text" name="first_name" value="John" required></td></tr> <tr><th>Last name:</th><td><input type="text" name="last_name" value="Lennon" required></td></tr> <tr><th>Birthday:</th><td><input type="text" name="birthday" value="1940-10-9" required> <input type="hidden" name="hidden_text"></td></tr>""" ) self.assertHTMLEqual( p.as_ul(), """<li><ul class="errorlist nonfield"><li>(Hidden field hidden_text) This field is required.</li></ul></li> <li>First name: <input type="text" name="first_name" value="John" required></li> <li>Last name: <input type="text" name="last_name" value="Lennon" required></li> <li>Birthday: <input type="text" name="birthday" value="1940-10-9" required> <input type="hidden" name="hidden_text"></li>""" ) self.assertHTMLEqual( p.as_p(), """<ul class="errorlist nonfield"><li>(Hidden field hidden_text) This field is required.</li></ul> <p>First name: <input type="text" name="first_name" value="John" required></p> <p>Last name: <input type="text" name="last_name" value="Lennon" required></p> <p>Birthday: <input type="text" name="birthday" value="1940-10-9" required> <input type="hidden" name="hidden_text"></p>""" ) # A corner case: It's possible for a form to have only HiddenInputs. class TestForm(Form): foo = CharField(widget=HiddenInput) bar = CharField(widget=HiddenInput) p = TestForm(auto_id=False) self.assertHTMLEqual(p.as_table(), '<input type="hidden" name="foo"><input type="hidden" name="bar">') self.assertHTMLEqual(p.as_ul(), '<input type="hidden" name="foo"><input type="hidden" name="bar">') self.assertHTMLEqual(p.as_p(), '<input type="hidden" name="foo"><input type="hidden" name="bar">') def test_field_order(self): # A Form's fields are displayed in the same order in which they were defined. class TestForm(Form): field1 = CharField() field2 = CharField() field3 = CharField() field4 = CharField() field5 = CharField() field6 = CharField() field7 = CharField() field8 = CharField() field9 = CharField() field10 = CharField() field11 = CharField() field12 = CharField() field13 = CharField() field14 = CharField() p = TestForm(auto_id=False) self.assertHTMLEqual(p.as_table(), """<tr><th>Field1:</th><td><input type="text" name="field1" required></td></tr> <tr><th>Field2:</th><td><input type="text" name="field2" required></td></tr> <tr><th>Field3:</th><td><input type="text" name="field3" required></td></tr> <tr><th>Field4:</th><td><input type="text" name="field4" required></td></tr> <tr><th>Field5:</th><td><input type="text" name="field5" required></td></tr> <tr><th>Field6:</th><td><input type="text" name="field6" required></td></tr> <tr><th>Field7:</th><td><input type="text" name="field7" required></td></tr> <tr><th>Field8:</th><td><input type="text" name="field8" required></td></tr> <tr><th>Field9:</th><td><input type="text" name="field9" required></td></tr> <tr><th>Field10:</th><td><input type="text" name="field10" required></td></tr> <tr><th>Field11:</th><td><input type="text" name="field11" required></td></tr> <tr><th>Field12:</th><td><input type="text" name="field12" required></td></tr> <tr><th>Field13:</th><td><input type="text" name="field13" required></td></tr> <tr><th>Field14:</th><td><input type="text" name="field14" required></td></tr>""") def test_explicit_field_order(self): class TestFormParent(Form): field1 = CharField() field2 = CharField() field4 = CharField() field5 = CharField() field6 = CharField() field_order = ['field6', 'field5', 'field4', 'field2', 'field1'] class TestForm(TestFormParent): field3 = CharField() field_order = ['field2', 'field4', 'field3', 'field5', 'field6'] class TestFormRemove(TestForm): field1 = None class TestFormMissing(TestForm): field_order = ['field2', 'field4', 'field3', 'field5', 'field6', 'field1'] field1 = None class TestFormInit(TestFormParent): field3 = CharField() field_order = None def __init__(self, **kwargs): super().__init__(**kwargs) self.order_fields(field_order=TestForm.field_order) p = TestFormParent() self.assertEqual(list(p.fields), TestFormParent.field_order) p = TestFormRemove() self.assertEqual(list(p.fields), TestForm.field_order) p = TestFormMissing() self.assertEqual(list(p.fields), TestForm.field_order) p = TestForm() self.assertEqual(list(p.fields), TestFormMissing.field_order) p = TestFormInit() order = [*TestForm.field_order, 'field1'] self.assertEqual(list(p.fields), order) TestForm.field_order = ['unknown'] p = TestForm() self.assertEqual(list(p.fields), ['field1', 'field2', 'field4', 'field5', 'field6', 'field3']) def test_form_html_attributes(self): # Some Field classes have an effect on the HTML attributes of their associated # Widget. If you set max_length in a CharField and its associated widget is # either a TextInput or PasswordInput, then the widget's rendered HTML will # include the "maxlength" attribute. class UserRegistration(Form): username = CharField(max_length=10) # uses TextInput by default password = CharField(max_length=10, widget=PasswordInput) realname = CharField(max_length=10, widget=TextInput) # redundantly define widget, just to test address = CharField() # no max_length defined here p = UserRegistration(auto_id=False) self.assertHTMLEqual( p.as_ul(), """<li>Username: <input type="text" name="username" maxlength="10" required></li> <li>Password: <input type="password" name="password" maxlength="10" required></li> <li>Realname: <input type="text" name="realname" maxlength="10" required></li> <li>Address: <input type="text" name="address" required></li>""" ) # If you specify a custom "attrs" that includes the "maxlength" attribute, # the Field's max_length attribute will override whatever "maxlength" you specify # in "attrs". class UserRegistration(Form): username = CharField(max_length=10, widget=TextInput(attrs={'maxlength': 20})) password = CharField(max_length=10, widget=PasswordInput) p = UserRegistration(auto_id=False) self.assertHTMLEqual( p.as_ul(), """<li>Username: <input type="text" name="username" maxlength="10" required></li> <li>Password: <input type="password" name="password" maxlength="10" required></li>""" ) def test_specifying_labels(self): # You can specify the label for a field by using the 'label' argument to a Field # class. If you don't specify 'label', Django will use the field name with # underscores converted to spaces, and the initial letter capitalized. class UserRegistration(Form): username = CharField(max_length=10, label='Your username') password1 = CharField(widget=PasswordInput) password2 = CharField(widget=PasswordInput, label='Contraseña (de nuevo)') p = UserRegistration(auto_id=False) self.assertHTMLEqual( p.as_ul(), """<li>Your username: <input type="text" name="username" maxlength="10" required></li> <li>Password1: <input type="password" name="password1" required></li> <li>Contraseña (de nuevo): <input type="password" name="password2" required></li>""" ) # Labels for as_* methods will only end in a colon if they don't end in other # punctuation already. class Questions(Form): q1 = CharField(label='The first question') q2 = CharField(label='What is your name?') q3 = CharField(label='The answer to life is:') q4 = CharField(label='Answer this question!') q5 = CharField(label='The last question. Period.') self.assertHTMLEqual( Questions(auto_id=False).as_p(), """<p>The first question: <input type="text" name="q1" required></p> <p>What is your name? <input type="text" name="q2" required></p> <p>The answer to life is: <input type="text" name="q3" required></p> <p>Answer this question! <input type="text" name="q4" required></p> <p>The last question. Period. <input type="text" name="q5" required></p>""" ) self.assertHTMLEqual( Questions().as_p(), """<p><label for="id_q1">The first question:</label> <input type="text" name="q1" id="id_q1" required></p> <p><label for="id_q2">What is your name?</label> <input type="text" name="q2" id="id_q2" required></p> <p><label for="id_q3">The answer to life is:</label> <input type="text" name="q3" id="id_q3" required></p> <p><label for="id_q4">Answer this question!</label> <input type="text" name="q4" id="id_q4" required></p> <p><label for="id_q5">The last question. Period.</label> <input type="text" name="q5" id="id_q5" required></p>""" ) # If a label is set to the empty string for a field, that field won't get a label. class UserRegistration(Form): username = CharField(max_length=10, label='') password = CharField(widget=PasswordInput) p = UserRegistration(auto_id=False) self.assertHTMLEqual(p.as_ul(), """<li> <input type="text" name="username" maxlength="10" required></li> <li>Password: <input type="password" name="password" required></li>""") p = UserRegistration(auto_id='id_%s') self.assertHTMLEqual( p.as_ul(), """<li> <input id="id_username" type="text" name="username" maxlength="10" required></li> <li><label for="id_password">Password:</label> <input type="password" name="password" id="id_password" required></li>""" ) # If label is None, Django will auto-create the label from the field name. This # is default behavior. class UserRegistration(Form): username = CharField(max_length=10, label=None) password = CharField(widget=PasswordInput) p = UserRegistration(auto_id=False) self.assertHTMLEqual( p.as_ul(), """<li>Username: <input type="text" name="username" maxlength="10" required></li> <li>Password: <input type="password" name="password" required></li>""" ) p = UserRegistration(auto_id='id_%s') self.assertHTMLEqual( p.as_ul(), """<li><label for="id_username">Username:</label> <input id="id_username" type="text" name="username" maxlength="10" required></li> <li><label for="id_password">Password:</label> <input type="password" name="password" id="id_password" required></li>""" ) def test_label_suffix(self): # You can specify the 'label_suffix' argument to a Form class to modify the # punctuation symbol used at the end of a label. By default, the colon (:) is # used, and is only appended to the label if the label doesn't already end with a # punctuation symbol: ., !, ? or :. If you specify a different suffix, it will # be appended regardless of the last character of the label. class FavoriteForm(Form): color = CharField(label='Favorite color?') animal = CharField(label='Favorite animal') answer = CharField(label='Secret answer', label_suffix=' =') f = FavoriteForm(auto_id=False) self.assertHTMLEqual(f.as_ul(), """<li>Favorite color? <input type="text" name="color" required></li> <li>Favorite animal: <input type="text" name="animal" required></li> <li>Secret answer = <input type="text" name="answer" required></li>""") f = FavoriteForm(auto_id=False, label_suffix='?') self.assertHTMLEqual(f.as_ul(), """<li>Favorite color? <input type="text" name="color" required></li> <li>Favorite animal? <input type="text" name="animal" required></li> <li>Secret answer = <input type="text" name="answer" required></li>""") f = FavoriteForm(auto_id=False, label_suffix='') self.assertHTMLEqual(f.as_ul(), """<li>Favorite color? <input type="text" name="color" required></li> <li>Favorite animal <input type="text" name="animal" required></li> <li>Secret answer = <input type="text" name="answer" required></li>""") f = FavoriteForm(auto_id=False, label_suffix='\u2192') self.assertHTMLEqual( f.as_ul(), '<li>Favorite color? <input type="text" name="color" required></li>\n' '<li>Favorite animal\u2192 <input type="text" name="animal" required></li>\n' '<li>Secret answer = <input type="text" name="answer" required></li>' ) def test_initial_data(self): # You can specify initial data for a field by using the 'initial' argument to a # Field class. This initial data is displayed when a Form is rendered with *no* # data. It is not displayed when a Form is rendered with any data (including an # empty dictionary). Also, the initial value is *not* used if data for a # particular required field isn't provided. class UserRegistration(Form): username = CharField(max_length=10, initial='django') password = CharField(widget=PasswordInput) # Here, we're not submitting any data, so the initial value will be displayed.) p = UserRegistration(auto_id=False) self.assertHTMLEqual( p.as_ul(), """<li>Username: <input type="text" name="username" value="django" maxlength="10" required></li> <li>Password: <input type="password" name="password" required></li>""" ) # Here, we're submitting data, so the initial value will *not* be displayed. p = UserRegistration({}, auto_id=False) self.assertHTMLEqual( p.as_ul(), """<li><ul class="errorlist"><li>This field is required.</li></ul> Username: <input type="text" name="username" maxlength="10" required></li> <li><ul class="errorlist"><li>This field is required.</li></ul> Password: <input type="password" name="password" required></li>""" ) p = UserRegistration({'username': ''}, auto_id=False) self.assertHTMLEqual( p.as_ul(), """<li><ul class="errorlist"><li>This field is required.</li></ul> Username: <input type="text" name="username" maxlength="10" required></li> <li><ul class="errorlist"><li>This field is required.</li></ul> Password: <input type="password" name="password" required></li>""" ) p = UserRegistration({'username': 'foo'}, auto_id=False) self.assertHTMLEqual( p.as_ul(), """<li>Username: <input type="text" name="username" value="foo" maxlength="10" required></li> <li><ul class="errorlist"><li>This field is required.</li></ul> Password: <input type="password" name="password" required></li>""" ) # An 'initial' value is *not* used as a fallback if data is not provided. In this # example, we don't provide a value for 'username', and the form raises a # validation error rather than using the initial value for 'username'. p = UserRegistration({'password': 'secret'}) self.assertEqual(p.errors['username'], ['This field is required.']) self.assertFalse(p.is_valid()) def test_dynamic_initial_data(self): # The previous technique dealt with "hard-coded" initial data, but it's also # possible to specify initial data after you've already created the Form class # (i.e., at runtime). Use the 'initial' parameter to the Form constructor. This # should be a dictionary containing initial values for one or more fields in the # form, keyed by field name. class UserRegistration(Form): username = CharField(max_length=10) password = CharField(widget=PasswordInput) # Here, we're not submitting any data, so the initial value will be displayed.) p = UserRegistration(initial={'username': 'django'}, auto_id=False) self.assertHTMLEqual( p.as_ul(), """<li>Username: <input type="text" name="username" value="django" maxlength="10" required></li> <li>Password: <input type="password" name="password" required></li>""" ) p = UserRegistration(initial={'username': 'stephane'}, auto_id=False) self.assertHTMLEqual( p.as_ul(), """<li>Username: <input type="text" name="username" value="stephane" maxlength="10" required></li> <li>Password: <input type="password" name="password" required></li>""" ) # The 'initial' parameter is meaningless if you pass data. p = UserRegistration({}, initial={'username': 'django'}, auto_id=False) self.assertHTMLEqual( p.as_ul(), """<li><ul class="errorlist"><li>This field is required.</li></ul> Username: <input type="text" name="username" maxlength="10" required></li> <li><ul class="errorlist"><li>This field is required.</li></ul> Password: <input type="password" name="password" required></li>""" ) p = UserRegistration({'username': ''}, initial={'username': 'django'}, auto_id=False) self.assertHTMLEqual( p.as_ul(), """<li><ul class="errorlist"><li>This field is required.</li></ul> Username: <input type="text" name="username" maxlength="10" required></li> <li><ul class="errorlist"><li>This field is required.</li></ul> Password: <input type="password" name="password" required></li>""" ) p = UserRegistration({'username': 'foo'}, initial={'username': 'django'}, auto_id=False) self.assertHTMLEqual( p.as_ul(), """<li>Username: <input type="text" name="username" value="foo" maxlength="10" required></li> <li><ul class="errorlist"><li>This field is required.</li></ul> Password: <input type="password" name="password" required></li>""" ) # A dynamic 'initial' value is *not* used as a fallback if data is not provided. # In this example, we don't provide a value for 'username', and the form raises a # validation error rather than using the initial value for 'username'. p = UserRegistration({'password': 'secret'}, initial={'username': 'django'}) self.assertEqual(p.errors['username'], ['This field is required.']) self.assertFalse(p.is_valid()) # If a Form defines 'initial' *and* 'initial' is passed as a parameter to Form(), # then the latter will get precedence. class UserRegistration(Form): username = CharField(max_length=10, initial='django') password = CharField(widget=PasswordInput) p = UserRegistration(initial={'username': 'babik'}, auto_id=False) self.assertHTMLEqual( p.as_ul(), """<li>Username: <input type="text" name="username" value="babik" maxlength="10" required></li> <li>Password: <input type="password" name="password" required></li>""" ) def test_callable_initial_data(self): # The previous technique dealt with raw values as initial data, but it's also # possible to specify callable data. class UserRegistration(Form): username = CharField(max_length=10) password = CharField(widget=PasswordInput) options = MultipleChoiceField(choices=[('f', 'foo'), ('b', 'bar'), ('w', 'whiz')]) # We need to define functions that get called later.) def initial_django(): return 'django' def initial_stephane(): return 'stephane' def initial_options(): return ['f', 'b'] def initial_other_options(): return ['b', 'w'] # Here, we're not submitting any data, so the initial value will be displayed.) p = UserRegistration(initial={'username': initial_django, 'options': initial_options}, auto_id=False) self.assertHTMLEqual( p.as_ul(), """<li>Username: <input type="text" name="username" value="django" maxlength="10" required></li> <li>Password: <input type="password" name="password" required></li> <li>Options: <select multiple name="options" required> <option value="f" selected>foo</option> <option value="b" selected>bar</option> <option value="w">whiz</option> </select></li>""" ) # The 'initial' parameter is meaningless if you pass data. p = UserRegistration({}, initial={'username': initial_django, 'options': initial_options}, auto_id=False) self.assertHTMLEqual( p.as_ul(), """<li><ul class="errorlist"><li>This field is required.</li></ul> Username: <input type="text" name="username" maxlength="10" required></li> <li><ul class="errorlist"><li>This field is required.</li></ul> Password: <input type="password" name="password" required></li> <li><ul class="errorlist"><li>This field is required.</li></ul> Options: <select multiple name="options" required> <option value="f">foo</option> <option value="b">bar</option> <option value="w">whiz</option> </select></li>""" ) p = UserRegistration({'username': ''}, initial={'username': initial_django}, auto_id=False) self.assertHTMLEqual( p.as_ul(), """<li><ul class="errorlist"><li>This field is required.</li></ul> Username: <input type="text" name="username" maxlength="10" required></li> <li><ul class="errorlist"><li>This field is required.</li></ul> Password: <input type="password" name="password" required></li> <li><ul class="errorlist"><li>This field is required.</li></ul> Options: <select multiple name="options" required> <option value="f">foo</option> <option value="b">bar</option> <option value="w">whiz</option> </select></li>""" ) p = UserRegistration( {'username': 'foo', 'options': ['f', 'b']}, initial={'username': initial_django}, auto_id=False ) self.assertHTMLEqual( p.as_ul(), """<li>Username: <input type="text" name="username" value="foo" maxlength="10" required></li> <li><ul class="errorlist"><li>This field is required.</li></ul> Password: <input type="password" name="password" required></li> <li>Options: <select multiple name="options" required> <option value="f" selected>foo</option> <option value="b" selected>bar</option> <option value="w">whiz</option> </select></li>""" ) # A callable 'initial' value is *not* used as a fallback if data is not provided. # In this example, we don't provide a value for 'username', and the form raises a # validation error rather than using the initial value for 'username'. p = UserRegistration({'password': 'secret'}, initial={'username': initial_django, 'options': initial_options}) self.assertEqual(p.errors['username'], ['This field is required.']) self.assertFalse(p.is_valid()) # If a Form defines 'initial' *and* 'initial' is passed as a parameter to Form(), # then the latter will get precedence. class UserRegistration(Form): username = CharField(max_length=10, initial=initial_django) password = CharField(widget=PasswordInput) options = MultipleChoiceField( choices=[('f', 'foo'), ('b', 'bar'), ('w', 'whiz')], initial=initial_other_options, ) p = UserRegistration(auto_id=False) self.assertHTMLEqual( p.as_ul(), """<li>Username: <input type="text" name="username" value="django" maxlength="10" required></li> <li>Password: <input type="password" name="password" required></li> <li>Options: <select multiple name="options" required> <option value="f">foo</option> <option value="b" selected>bar</option> <option value="w" selected>whiz</option> </select></li>""" ) p = UserRegistration(initial={'username': initial_stephane, 'options': initial_options}, auto_id=False) self.assertHTMLEqual( p.as_ul(), """<li>Username: <input type="text" name="username" value="stephane" maxlength="10" required></li> <li>Password: <input type="password" name="password" required></li> <li>Options: <select multiple name="options" required> <option value="f" selected>foo</option> <option value="b" selected>bar</option> <option value="w">whiz</option> </select></li>""" ) def test_get_initial_for_field(self): class PersonForm(Form): first_name = CharField(initial='John') last_name = CharField(initial='Doe') age = IntegerField() occupation = CharField(initial=lambda: 'Unknown') form = PersonForm(initial={'first_name': 'Jane'}) self.assertIsNone(form.get_initial_for_field(form.fields['age'], 'age')) self.assertEqual(form.get_initial_for_field(form.fields['last_name'], 'last_name'), 'Doe') # Form.initial overrides Field.initial. self.assertEqual(form.get_initial_for_field(form.fields['first_name'], 'first_name'), 'Jane') # Callables are evaluated. self.assertEqual(form.get_initial_for_field(form.fields['occupation'], 'occupation'), 'Unknown') def test_changed_data(self): class Person(Form): first_name = CharField(initial='Hans') last_name = CharField(initial='Greatel') birthday = DateField(initial=datetime.date(1974, 8, 16)) p = Person(data={'first_name': 'Hans', 'last_name': 'Scrmbl', 'birthday': '1974-08-16'}) self.assertTrue(p.is_valid()) self.assertNotIn('first_name', p.changed_data) self.assertIn('last_name', p.changed_data) self.assertNotIn('birthday', p.changed_data) # A field raising ValidationError is always in changed_data class PedanticField(forms.Field): def to_python(self, value): raise ValidationError('Whatever') class Person2(Person): pedantic = PedanticField(initial='whatever', show_hidden_initial=True) p = Person2(data={ 'first_name': 'Hans', 'last_name': 'Scrmbl', 'birthday': '1974-08-16', 'initial-pedantic': 'whatever', }) self.assertFalse(p.is_valid()) self.assertIn('pedantic', p.changed_data) def test_boundfield_values(self): # It's possible to get to the value which would be used for rendering # the widget for a field by using the BoundField's value method. class UserRegistration(Form): username = CharField(max_length=10, initial='djangonaut') password = CharField(widget=PasswordInput) unbound = UserRegistration() bound = UserRegistration({'password': 'foo'}) self.assertIsNone(bound['username'].value()) self.assertEqual(unbound['username'].value(), 'djangonaut') self.assertEqual(bound['password'].value(), 'foo') self.assertIsNone(unbound['password'].value()) def test_boundfield_initial_called_once(self): """ Multiple calls to BoundField().value() in an unbound form should return the same result each time (#24391). """ class MyForm(Form): name = CharField(max_length=10, initial=uuid.uuid4) form = MyForm() name = form['name'] self.assertEqual(name.value(), name.value()) # BoundField is also cached self.assertIs(form['name'], name) def test_boundfield_value_disabled_callable_initial(self): class PersonForm(Form): name = CharField(initial=lambda: 'John Doe', disabled=True) # Without form data. form = PersonForm() self.assertEqual(form['name'].value(), 'John Doe') # With form data. As the field is disabled, the value should not be # affected by the form data. form = PersonForm({}) self.assertEqual(form['name'].value(), 'John Doe') def test_custom_boundfield(self): class CustomField(CharField): def get_bound_field(self, form, name): return (form, name) class SampleForm(Form): name = CustomField() f = SampleForm() self.assertEqual(f['name'], (f, 'name')) def test_initial_datetime_values(self): now = datetime.datetime.now() # Nix microseconds (since they should be ignored). #22502 now_no_ms = now.replace(microsecond=0) if now == now_no_ms: now = now.replace(microsecond=1) def delayed_now(): return now def delayed_now_time(): return now.time() class HiddenInputWithoutMicrosec(HiddenInput): supports_microseconds = False class TextInputWithoutMicrosec(TextInput): supports_microseconds = False class DateTimeForm(Form): auto_timestamp = DateTimeField(initial=delayed_now) auto_time_only = TimeField(initial=delayed_now_time) supports_microseconds = DateTimeField(initial=delayed_now, widget=TextInput) hi_default_microsec = DateTimeField(initial=delayed_now, widget=HiddenInput) hi_without_microsec = DateTimeField(initial=delayed_now, widget=HiddenInputWithoutMicrosec) ti_without_microsec = DateTimeField(initial=delayed_now, widget=TextInputWithoutMicrosec) unbound = DateTimeForm() self.assertEqual(unbound['auto_timestamp'].value(), now_no_ms) self.assertEqual(unbound['auto_time_only'].value(), now_no_ms.time()) self.assertEqual(unbound['supports_microseconds'].value(), now) self.assertEqual(unbound['hi_default_microsec'].value(), now) self.assertEqual(unbound['hi_without_microsec'].value(), now_no_ms) self.assertEqual(unbound['ti_without_microsec'].value(), now_no_ms) def test_datetime_clean_initial_callable_disabled(self): now = datetime.datetime(2006, 10, 25, 14, 30, 45, 123456) class DateTimeForm(forms.Form): dt = DateTimeField(initial=lambda: now, disabled=True) form = DateTimeForm({}) self.assertEqual(form.errors, {}) self.assertEqual(form.cleaned_data, {'dt': now}) def test_datetime_changed_data_callable_with_microseconds(self): class DateTimeForm(forms.Form): dt = DateTimeField(initial=lambda: datetime.datetime(2006, 10, 25, 14, 30, 45, 123456), disabled=True) form = DateTimeForm({'dt': '2006-10-25 14:30:45'}) self.assertEqual(form.changed_data, []) def test_help_text(self): # You can specify descriptive text for a field by using the 'help_text' argument) class UserRegistration(Form): username = CharField(max_length=10, help_text='e.g., [email protected]') password = CharField(widget=PasswordInput, help_text='Wählen Sie mit Bedacht.') p = UserRegistration(auto_id=False) self.assertHTMLEqual( p.as_ul(), """<li>Username: <input type="text" name="username" maxlength="10" required> <span class="helptext">e.g., [email protected]</span></li> <li>Password: <input type="password" name="password" required> <span class="helptext">Wählen Sie mit Bedacht.</span></li>""" ) self.assertHTMLEqual( p.as_p(), """<p>Username: <input type="text" name="username" maxlength="10" required> <span class="helptext">e.g., [email protected]</span></p> <p>Password: <input type="password" name="password" required> <span class="helptext">Wählen Sie mit Bedacht.</span></p>""" ) self.assertHTMLEqual( p.as_table(), """<tr><th>Username:</th><td><input type="text" name="username" maxlength="10" required><br> <span class="helptext">e.g., [email protected]</span></td></tr> <tr><th>Password:</th><td><input type="password" name="password" required><br> <span class="helptext">Wählen Sie mit Bedacht.</span></td></tr>""" ) # The help text is displayed whether or not data is provided for the form. p = UserRegistration({'username': 'foo'}, auto_id=False) self.assertHTMLEqual( p.as_ul(), """<li>Username: <input type="text" name="username" value="foo" maxlength="10" required> <span class="helptext">e.g., [email protected]</span></li> <li><ul class="errorlist"><li>This field is required.</li></ul> Password: <input type="password" name="password" required> <span class="helptext">Wählen Sie mit Bedacht.</span></li>""" ) # help_text is not displayed for hidden fields. It can be used for documentation # purposes, though. class UserRegistration(Form): username = CharField(max_length=10, help_text='e.g., [email protected]') password = CharField(widget=PasswordInput) next = CharField(widget=HiddenInput, initial='/', help_text='Redirect destination') p = UserRegistration(auto_id=False) self.assertHTMLEqual( p.as_ul(), """<li>Username: <input type="text" name="username" maxlength="10" required> <span class="helptext">e.g., [email protected]</span></li> <li>Password: <input type="password" name="password" required> <input type="hidden" name="next" value="/"></li>""" ) def test_subclassing_forms(self): # You can subclass a Form to add fields. The resulting form subclass will have # all of the fields of the parent Form, plus whichever fields you define in the # subclass. class Person(Form): first_name = CharField() last_name = CharField() birthday = DateField() class Musician(Person): instrument = CharField() p = Person(auto_id=False) self.assertHTMLEqual( p.as_ul(), """<li>First name: <input type="text" name="first_name" required></li> <li>Last name: <input type="text" name="last_name" required></li> <li>Birthday: <input type="text" name="birthday" required></li>""" ) m = Musician(auto_id=False) self.assertHTMLEqual( m.as_ul(), """<li>First name: <input type="text" name="first_name" required></li> <li>Last name: <input type="text" name="last_name" required></li> <li>Birthday: <input type="text" name="birthday" required></li> <li>Instrument: <input type="text" name="instrument" required></li>""" ) # Yes, you can subclass multiple forms. The fields are added in the order in # which the parent classes are listed. class Person(Form): first_name = CharField() last_name = CharField() birthday = DateField() class Instrument(Form): instrument = CharField() class Beatle(Person, Instrument): haircut_type = CharField() b = Beatle(auto_id=False) self.assertHTMLEqual(b.as_ul(), """<li>Instrument: <input type="text" name="instrument" required></li> <li>First name: <input type="text" name="first_name" required></li> <li>Last name: <input type="text" name="last_name" required></li> <li>Birthday: <input type="text" name="birthday" required></li> <li>Haircut type: <input type="text" name="haircut_type" required></li>""") def test_forms_with_prefixes(self): # Sometimes it's necessary to have multiple forms display on the same HTML page, # or multiple copies of the same form. We can accomplish this with form prefixes. # Pass the keyword argument 'prefix' to the Form constructor to use this feature. # This value will be prepended to each HTML form field name. One way to think # about this is "namespaces for HTML forms". Notice that in the data argument, # each field's key has the prefix, in this case 'person1', prepended to the # actual field name. class Person(Form): first_name = CharField() last_name = CharField() birthday = DateField() data = { 'person1-first_name': 'John', 'person1-last_name': 'Lennon', 'person1-birthday': '1940-10-9' } p = Person(data, prefix='person1') self.assertHTMLEqual( p.as_ul(), """<li><label for="id_person1-first_name">First name:</label> <input type="text" name="person1-first_name" value="John" id="id_person1-first_name" required></li> <li><label for="id_person1-last_name">Last name:</label> <input type="text" name="person1-last_name" value="Lennon" id="id_person1-last_name" required></li> <li><label for="id_person1-birthday">Birthday:</label> <input type="text" name="person1-birthday" value="1940-10-9" id="id_person1-birthday" required></li>""" ) self.assertHTMLEqual( str(p['first_name']), '<input type="text" name="person1-first_name" value="John" id="id_person1-first_name" required>' ) self.assertHTMLEqual( str(p['last_name']), '<input type="text" name="person1-last_name" value="Lennon" id="id_person1-last_name" required>' ) self.assertHTMLEqual( str(p['birthday']), '<input type="text" name="person1-birthday" value="1940-10-9" id="id_person1-birthday" required>' ) self.assertEqual(p.errors, {}) self.assertTrue(p.is_valid()) self.assertEqual(p.cleaned_data['first_name'], 'John') self.assertEqual(p.cleaned_data['last_name'], 'Lennon') self.assertEqual(p.cleaned_data['birthday'], datetime.date(1940, 10, 9)) # Let's try submitting some bad data to make sure form.errors and field.errors # work as expected. data = { 'person1-first_name': '', 'person1-last_name': '', 'person1-birthday': '' } p = Person(data, prefix='person1') self.assertEqual(p.errors['first_name'], ['This field is required.']) self.assertEqual(p.errors['last_name'], ['This field is required.']) self.assertEqual(p.errors['birthday'], ['This field is required.']) self.assertEqual(p['first_name'].errors, ['This field is required.']) # Accessing a nonexistent field. with self.assertRaises(KeyError): p['person1-first_name'].errors # In this example, the data doesn't have a prefix, but the form requires it, so # the form doesn't "see" the fields. data = { 'first_name': 'John', 'last_name': 'Lennon', 'birthday': '1940-10-9' } p = Person(data, prefix='person1') self.assertEqual(p.errors['first_name'], ['This field is required.']) self.assertEqual(p.errors['last_name'], ['This field is required.']) self.assertEqual(p.errors['birthday'], ['This field is required.']) # With prefixes, a single data dictionary can hold data for multiple instances # of the same form. data = { 'person1-first_name': 'John', 'person1-last_name': 'Lennon', 'person1-birthday': '1940-10-9', 'person2-first_name': 'Jim', 'person2-last_name': 'Morrison', 'person2-birthday': '1943-12-8' } p1 = Person(data, prefix='person1') self.assertTrue(p1.is_valid()) self.assertEqual(p1.cleaned_data['first_name'], 'John') self.assertEqual(p1.cleaned_data['last_name'], 'Lennon') self.assertEqual(p1.cleaned_data['birthday'], datetime.date(1940, 10, 9)) p2 = Person(data, prefix='person2') self.assertTrue(p2.is_valid()) self.assertEqual(p2.cleaned_data['first_name'], 'Jim') self.assertEqual(p2.cleaned_data['last_name'], 'Morrison') self.assertEqual(p2.cleaned_data['birthday'], datetime.date(1943, 12, 8)) # By default, forms append a hyphen between the prefix and the field name, but a # form can alter that behavior by implementing the add_prefix() method. This # method takes a field name and returns the prefixed field, according to # self.prefix. class Person(Form): first_name = CharField() last_name = CharField() birthday = DateField() def add_prefix(self, field_name): return '%s-prefix-%s' % (self.prefix, field_name) if self.prefix else field_name p = Person(prefix='foo') self.assertHTMLEqual( p.as_ul(), """<li><label for="id_foo-prefix-first_name">First name:</label> <input type="text" name="foo-prefix-first_name" id="id_foo-prefix-first_name" required></li> <li><label for="id_foo-prefix-last_name">Last name:</label> <input type="text" name="foo-prefix-last_name" id="id_foo-prefix-last_name" required></li> <li><label for="id_foo-prefix-birthday">Birthday:</label> <input type="text" name="foo-prefix-birthday" id="id_foo-prefix-birthday" required></li>""" ) data = { 'foo-prefix-first_name': 'John', 'foo-prefix-last_name': 'Lennon', 'foo-prefix-birthday': '1940-10-9' } p = Person(data, prefix='foo') self.assertTrue(p.is_valid()) self.assertEqual(p.cleaned_data['first_name'], 'John') self.assertEqual(p.cleaned_data['last_name'], 'Lennon') self.assertEqual(p.cleaned_data['birthday'], datetime.date(1940, 10, 9)) def test_class_prefix(self): # Prefix can be also specified at the class level. class Person(Form): first_name = CharField() prefix = 'foo' p = Person() self.assertEqual(p.prefix, 'foo') p = Person(prefix='bar') self.assertEqual(p.prefix, 'bar') def test_forms_with_null_boolean(self): # NullBooleanField is a bit of a special case because its presentation (widget) # is different than its data. This is handled transparently, though. class Person(Form): name = CharField() is_cool = NullBooleanField() p = Person({'name': 'Joe'}, auto_id=False) self.assertHTMLEqual(str(p['is_cool']), """<select name="is_cool"> <option value="unknown" selected>Unknown</option> <option value="true">Yes</option> <option value="false">No</option> </select>""") p = Person({'name': 'Joe', 'is_cool': '1'}, auto_id=False) self.assertHTMLEqual(str(p['is_cool']), """<select name="is_cool"> <option value="unknown" selected>Unknown</option> <option value="true">Yes</option> <option value="false">No</option> </select>""") p = Person({'name': 'Joe', 'is_cool': '2'}, auto_id=False) self.assertHTMLEqual(str(p['is_cool']), """<select name="is_cool"> <option value="unknown">Unknown</option> <option value="true" selected>Yes</option> <option value="false">No</option> </select>""") p = Person({'name': 'Joe', 'is_cool': '3'}, auto_id=False) self.assertHTMLEqual(str(p['is_cool']), """<select name="is_cool"> <option value="unknown">Unknown</option> <option value="true">Yes</option> <option value="false" selected>No</option> </select>""") p = Person({'name': 'Joe', 'is_cool': True}, auto_id=False) self.assertHTMLEqual(str(p['is_cool']), """<select name="is_cool"> <option value="unknown">Unknown</option> <option value="true" selected>Yes</option> <option value="false">No</option> </select>""") p = Person({'name': 'Joe', 'is_cool': False}, auto_id=False) self.assertHTMLEqual(str(p['is_cool']), """<select name="is_cool"> <option value="unknown">Unknown</option> <option value="true">Yes</option> <option value="false" selected>No</option> </select>""") p = Person({'name': 'Joe', 'is_cool': 'unknown'}, auto_id=False) self.assertHTMLEqual(str(p['is_cool']), """<select name="is_cool"> <option value="unknown" selected>Unknown</option> <option value="true">Yes</option> <option value="false">No</option> </select>""") p = Person({'name': 'Joe', 'is_cool': 'true'}, auto_id=False) self.assertHTMLEqual(str(p['is_cool']), """<select name="is_cool"> <option value="unknown">Unknown</option> <option value="true" selected>Yes</option> <option value="false">No</option> </select>""") p = Person({'name': 'Joe', 'is_cool': 'false'}, auto_id=False) self.assertHTMLEqual(str(p['is_cool']), """<select name="is_cool"> <option value="unknown">Unknown</option> <option value="true">Yes</option> <option value="false" selected>No</option> </select>""") def test_forms_with_file_fields(self): # FileFields are a special case because they take their data from the request.FILES, # not request.POST. class FileForm(Form): file1 = FileField() f = FileForm(auto_id=False) self.assertHTMLEqual( f.as_table(), '<tr><th>File1:</th><td><input type="file" name="file1" required></td></tr>', ) f = FileForm(data={}, files={}, auto_id=False) self.assertHTMLEqual( f.as_table(), '<tr><th>File1:</th><td>' '<ul class="errorlist"><li>This field is required.</li></ul>' '<input type="file" name="file1" required></td></tr>' ) f = FileForm(data={}, files={'file1': SimpleUploadedFile('name', b'')}, auto_id=False) self.assertHTMLEqual( f.as_table(), '<tr><th>File1:</th><td>' '<ul class="errorlist"><li>The submitted file is empty.</li></ul>' '<input type="file" name="file1" required></td></tr>' ) f = FileForm(data={}, files={'file1': 'something that is not a file'}, auto_id=False) self.assertHTMLEqual( f.as_table(), '<tr><th>File1:</th><td>' '<ul class="errorlist"><li>No file was submitted. Check the ' 'encoding type on the form.</li></ul>' '<input type="file" name="file1" required></td></tr>' ) f = FileForm(data={}, files={'file1': SimpleUploadedFile('name', b'some content')}, auto_id=False) self.assertHTMLEqual( f.as_table(), '<tr><th>File1:</th><td><input type="file" name="file1" required></td></tr>', ) self.assertTrue(f.is_valid()) file1 = SimpleUploadedFile('我隻氣墊船裝滿晒鱔.txt', 'मेरी मँडराने वाली नाव सर्पमीनों से भरी ह'.encode()) f = FileForm(data={}, files={'file1': file1}, auto_id=False) self.assertHTMLEqual( f.as_table(), '<tr><th>File1:</th><td><input type="file" name="file1" required></td></tr>', ) # A required file field with initial data should not contain the # required HTML attribute. The file input is left blank by the user to # keep the existing, initial value. f = FileForm(initial={'file1': 'resume.txt'}, auto_id=False) self.assertHTMLEqual( f.as_table(), '<tr><th>File1:</th><td><input type="file" name="file1"></td></tr>', ) def test_filefield_initial_callable(self): class FileForm(forms.Form): file1 = forms.FileField(initial=lambda: 'resume.txt') f = FileForm({}) self.assertEqual(f.errors, {}) self.assertEqual(f.cleaned_data['file1'], 'resume.txt') def test_filefield_with_fileinput_required(self): class FileForm(Form): file1 = forms.FileField(widget=FileInput) f = FileForm(auto_id=False) self.assertHTMLEqual( f.as_table(), '<tr><th>File1:</th><td>' '<input type="file" name="file1" required></td></tr>', ) # A required file field with initial data doesn't contain the required # HTML attribute. The file input is left blank by the user to keep the # existing, initial value. f = FileForm(initial={'file1': 'resume.txt'}, auto_id=False) self.assertHTMLEqual( f.as_table(), '<tr><th>File1:</th><td><input type="file" name="file1"></td></tr>', ) def test_basic_processing_in_view(self): class UserRegistration(Form): username = CharField(max_length=10) password1 = CharField(widget=PasswordInput) password2 = CharField(widget=PasswordInput) def clean(self): if (self.cleaned_data.get('password1') and self.cleaned_data.get('password2') and self.cleaned_data['password1'] != self.cleaned_data['password2']): raise ValidationError('Please make sure your passwords match.') return self.cleaned_data def my_function(method, post_data): if method == 'POST': form = UserRegistration(post_data, auto_id=False) else: form = UserRegistration(auto_id=False) if form.is_valid(): return 'VALID: %r' % sorted(form.cleaned_data.items()) t = Template( '<form method="post">\n' '<table>\n{{ form }}\n</table>\n<input type="submit" required>\n</form>' ) return t.render(Context({'form': form})) # Case 1: GET (an empty form, with no errors).) self.assertHTMLEqual(my_function('GET', {}), """<form method="post"> <table> <tr><th>Username:</th><td><input type="text" name="username" maxlength="10" required></td></tr> <tr><th>Password1:</th><td><input type="password" name="password1" required></td></tr> <tr><th>Password2:</th><td><input type="password" name="password2" required></td></tr> </table> <input type="submit" required> </form>""") # Case 2: POST with erroneous data (a redisplayed form, with errors).) self.assertHTMLEqual( my_function('POST', {'username': 'this-is-a-long-username', 'password1': 'foo', 'password2': 'bar'}), """<form method="post"> <table> <tr><td colspan="2"><ul class="errorlist nonfield"><li>Please make sure your passwords match.</li></ul></td></tr> <tr><th>Username:</th><td><ul class="errorlist"> <li>Ensure this value has at most 10 characters (it has 23).</li></ul> <input type="text" name="username" value="this-is-a-long-username" maxlength="10" required></td></tr> <tr><th>Password1:</th><td><input type="password" name="password1" required></td></tr> <tr><th>Password2:</th><td><input type="password" name="password2" required></td></tr> </table> <input type="submit" required> </form>""" ) # Case 3: POST with valid data (the success message).) self.assertEqual( my_function('POST', {'username': 'adrian', 'password1': 'secret', 'password2': 'secret'}), "VALID: [('password1', 'secret'), ('password2', 'secret'), ('username', 'adrian')]" ) def test_templates_with_forms(self): class UserRegistration(Form): username = CharField(max_length=10, help_text="Good luck picking a username that doesn't already exist.") password1 = CharField(widget=PasswordInput) password2 = CharField(widget=PasswordInput) def clean(self): if (self.cleaned_data.get('password1') and self.cleaned_data.get('password2') and self.cleaned_data['password1'] != self.cleaned_data['password2']): raise ValidationError('Please make sure your passwords match.') return self.cleaned_data # You have full flexibility in displaying form fields in a template. Just pass a # Form instance to the template, and use "dot" access to refer to individual # fields. Note, however, that this flexibility comes with the responsibility of # displaying all the errors, including any that might not be associated with a # particular field. t = Template('''<form> {{ form.username.errors.as_ul }}<p><label>Your username: {{ form.username }}</label></p> {{ form.password1.errors.as_ul }}<p><label>Password: {{ form.password1 }}</label></p> {{ form.password2.errors.as_ul }}<p><label>Password (again): {{ form.password2 }}</label></p> <input type="submit" required> </form>''') self.assertHTMLEqual(t.render(Context({'form': UserRegistration(auto_id=False)})), """<form> <p><label>Your username: <input type="text" name="username" maxlength="10" required></label></p> <p><label>Password: <input type="password" name="password1" required></label></p> <p><label>Password (again): <input type="password" name="password2" required></label></p> <input type="submit" required> </form>""") self.assertHTMLEqual( t.render(Context({'form': UserRegistration({'username': 'django'}, auto_id=False)})), """<form> <p><label>Your username: <input type="text" name="username" value="django" maxlength="10" required></label></p> <ul class="errorlist"><li>This field is required.</li></ul><p> <label>Password: <input type="password" name="password1" required></label></p> <ul class="errorlist"><li>This field is required.</li></ul> <p><label>Password (again): <input type="password" name="password2" required></label></p> <input type="submit" required> </form>""" ) # Use form.[field].label to output a field's label. You can specify the label for # a field by using the 'label' argument to a Field class. If you don't specify # 'label', Django will use the field name with underscores converted to spaces, # and the initial letter capitalized. t = Template('''<form> <p><label>{{ form.username.label }}: {{ form.username }}</label></p> <p><label>{{ form.password1.label }}: {{ form.password1 }}</label></p> <p><label>{{ form.password2.label }}: {{ form.password2 }}</label></p> <input type="submit" required> </form>''') self.assertHTMLEqual(t.render(Context({'form': UserRegistration(auto_id=False)})), """<form> <p><label>Username: <input type="text" name="username" maxlength="10" required></label></p> <p><label>Password1: <input type="password" name="password1" required></label></p> <p><label>Password2: <input type="password" name="password2" required></label></p> <input type="submit" required> </form>""") # User form.[field].label_tag to output a field's label with a <label> tag # wrapped around it, but *only* if the given field has an "id" attribute. # Recall from above that passing the "auto_id" argument to a Form gives each # field an "id" attribute. t = Template('''<form> <p>{{ form.username.label_tag }} {{ form.username }}</p> <p>{{ form.password1.label_tag }} {{ form.password1 }}</p> <p>{{ form.password2.label_tag }} {{ form.password2 }}</p> <input type="submit" required> </form>''') self.assertHTMLEqual(t.render(Context({'form': UserRegistration(auto_id=False)})), """<form> <p>Username: <input type="text" name="username" maxlength="10" required></p> <p>Password1: <input type="password" name="password1" required></p> <p>Password2: <input type="password" name="password2" required></p> <input type="submit" required> </form>""") self.assertHTMLEqual(t.render(Context({'form': UserRegistration(auto_id='id_%s')})), """<form> <p><label for="id_username">Username:</label> <input id="id_username" type="text" name="username" maxlength="10" required></p> <p><label for="id_password1">Password1:</label> <input type="password" name="password1" id="id_password1" required></p> <p><label for="id_password2">Password2:</label> <input type="password" name="password2" id="id_password2" required></p> <input type="submit" required> </form>""") # User form.[field].help_text to output a field's help text. If the given field # does not have help text, nothing will be output. t = Template('''<form> <p>{{ form.username.label_tag }} {{ form.username }}<br>{{ form.username.help_text }}</p> <p>{{ form.password1.label_tag }} {{ form.password1 }}</p> <p>{{ form.password2.label_tag }} {{ form.password2 }}</p> <input type="submit" required> </form>''') self.assertHTMLEqual( t.render(Context({'form': UserRegistration(auto_id=False)})), """<form> <p>Username: <input type="text" name="username" maxlength="10" required><br> Good luck picking a username that doesn&#x27;t already exist.</p> <p>Password1: <input type="password" name="password1" required></p> <p>Password2: <input type="password" name="password2" required></p> <input type="submit" required> </form>""" ) self.assertEqual( Template('{{ form.password1.help_text }}').render(Context({'form': UserRegistration(auto_id=False)})), '' ) # To display the errors that aren't associated with a particular field -- e.g., # the errors caused by Form.clean() -- use {{ form.non_field_errors }} in the # template. If used on its own, it is displayed as a <ul> (or an empty string, if # the list of errors is empty). You can also use it in {% if %} statements. t = Template('''<form> {{ form.username.errors.as_ul }}<p><label>Your username: {{ form.username }}</label></p> {{ form.password1.errors.as_ul }}<p><label>Password: {{ form.password1 }}</label></p> {{ form.password2.errors.as_ul }}<p><label>Password (again): {{ form.password2 }}</label></p> <input type="submit" required> </form>''') self.assertHTMLEqual( t.render(Context({ 'form': UserRegistration({'username': 'django', 'password1': 'foo', 'password2': 'bar'}, auto_id=False) })), """<form> <p><label>Your username: <input type="text" name="username" value="django" maxlength="10" required></label></p> <p><label>Password: <input type="password" name="password1" required></label></p> <p><label>Password (again): <input type="password" name="password2" required></label></p> <input type="submit" required> </form>""" ) t = Template('''<form> {{ form.non_field_errors }} {{ form.username.errors.as_ul }}<p><label>Your username: {{ form.username }}</label></p> {{ form.password1.errors.as_ul }}<p><label>Password: {{ form.password1 }}</label></p> {{ form.password2.errors.as_ul }}<p><label>Password (again): {{ form.password2 }}</label></p> <input type="submit" required> </form>''') self.assertHTMLEqual( t.render(Context({ 'form': UserRegistration({'username': 'django', 'password1': 'foo', 'password2': 'bar'}, auto_id=False) })), """<form> <ul class="errorlist nonfield"><li>Please make sure your passwords match.</li></ul> <p><label>Your username: <input type="text" name="username" value="django" maxlength="10" required></label></p> <p><label>Password: <input type="password" name="password1" required></label></p> <p><label>Password (again): <input type="password" name="password2" required></label></p> <input type="submit" required> </form>""" ) def test_empty_permitted(self): # Sometimes (pretty much in formsets) we want to allow a form to pass validation # if it is completely empty. We can accomplish this by using the empty_permitted # argument to a form constructor. class SongForm(Form): artist = CharField() name = CharField() # First let's show what happens id empty_permitted=False (the default): data = {'artist': '', 'song': ''} form = SongForm(data, empty_permitted=False) self.assertFalse(form.is_valid()) self.assertEqual(form.errors, {'name': ['This field is required.'], 'artist': ['This field is required.']}) self.assertEqual(form.cleaned_data, {}) # Now let's show what happens when empty_permitted=True and the form is empty. form = SongForm(data, empty_permitted=True, use_required_attribute=False) self.assertTrue(form.is_valid()) self.assertEqual(form.errors, {}) self.assertEqual(form.cleaned_data, {}) # But if we fill in data for one of the fields, the form is no longer empty and # the whole thing must pass validation. data = {'artist': 'The Doors', 'song': ''} form = SongForm(data, empty_permitted=False) self.assertFalse(form.is_valid()) self.assertEqual(form.errors, {'name': ['This field is required.']}) self.assertEqual(form.cleaned_data, {'artist': 'The Doors'}) # If a field is not given in the data then None is returned for its data. Lets # make sure that when checking for empty_permitted that None is treated # accordingly. data = {'artist': None, 'song': ''} form = SongForm(data, empty_permitted=True, use_required_attribute=False) self.assertTrue(form.is_valid()) # However, we *really* need to be sure we are checking for None as any data in # initial that returns False on a boolean call needs to be treated literally. class PriceForm(Form): amount = FloatField() qty = IntegerField() data = {'amount': '0.0', 'qty': ''} form = PriceForm(data, initial={'amount': 0.0}, empty_permitted=True, use_required_attribute=False) self.assertTrue(form.is_valid()) def test_empty_permitted_and_use_required_attribute(self): msg = ( 'The empty_permitted and use_required_attribute arguments may not ' 'both be True.' ) with self.assertRaisesMessage(ValueError, msg): Person(empty_permitted=True, use_required_attribute=True) def test_extracting_hidden_and_visible(self): class SongForm(Form): token = CharField(widget=HiddenInput) artist = CharField() name = CharField() form = SongForm() self.assertEqual([f.name for f in form.hidden_fields()], ['token']) self.assertEqual([f.name for f in form.visible_fields()], ['artist', 'name']) def test_hidden_initial_gets_id(self): class MyForm(Form): field1 = CharField(max_length=50, show_hidden_initial=True) self.assertHTMLEqual( MyForm().as_table(), '<tr><th><label for="id_field1">Field1:</label></th>' '<td><input id="id_field1" type="text" name="field1" maxlength="50" required>' '<input type="hidden" name="initial-field1" id="initial-id_field1"></td></tr>' ) def test_error_html_required_html_classes(self): class Person(Form): name = CharField() is_cool = NullBooleanField() email = EmailField(required=False) age = IntegerField() p = Person({}) p.error_css_class = 'error' p.required_css_class = 'required' self.assertHTMLEqual( p.as_ul(), """<li class="required error"><ul class="errorlist"><li>This field is required.</li></ul> <label class="required" for="id_name">Name:</label> <input type="text" name="name" id="id_name" required></li> <li class="required"><label class="required" for="id_is_cool">Is cool:</label> <select name="is_cool" id="id_is_cool"> <option value="unknown" selected>Unknown</option> <option value="true">Yes</option> <option value="false">No</option> </select></li> <li><label for="id_email">Email:</label> <input type="email" name="email" id="id_email"></li> <li class="required error"><ul class="errorlist"><li>This field is required.</li></ul> <label class="required" for="id_age">Age:</label> <input type="number" name="age" id="id_age" required></li>""" ) self.assertHTMLEqual( p.as_p(), """<ul class="errorlist"><li>This field is required.</li></ul> <p class="required error"><label class="required" for="id_name">Name:</label> <input type="text" name="name" id="id_name" required></p> <p class="required"><label class="required" for="id_is_cool">Is cool:</label> <select name="is_cool" id="id_is_cool"> <option value="unknown" selected>Unknown</option> <option value="true">Yes</option> <option value="false">No</option> </select></p> <p><label for="id_email">Email:</label> <input type="email" name="email" id="id_email"></p> <ul class="errorlist"><li>This field is required.</li></ul> <p class="required error"><label class="required" for="id_age">Age:</label> <input type="number" name="age" id="id_age" required></p>""" ) self.assertHTMLEqual( p.as_table(), """<tr class="required error"> <th><label class="required" for="id_name">Name:</label></th> <td><ul class="errorlist"><li>This field is required.</li></ul> <input type="text" name="name" id="id_name" required></td></tr> <tr class="required"><th><label class="required" for="id_is_cool">Is cool:</label></th> <td><select name="is_cool" id="id_is_cool"> <option value="unknown" selected>Unknown</option> <option value="true">Yes</option> <option value="false">No</option> </select></td></tr> <tr><th><label for="id_email">Email:</label></th><td> <input type="email" name="email" id="id_email"></td></tr> <tr class="required error"><th><label class="required" for="id_age">Age:</label></th> <td><ul class="errorlist"><li>This field is required.</li></ul> <input type="number" name="age" id="id_age" required></td></tr>""" ) def test_label_has_required_css_class(self): """ #17922 - required_css_class is added to the label_tag() of required fields. """ class SomeForm(Form): required_css_class = 'required' field = CharField(max_length=10) field2 = IntegerField(required=False) f = SomeForm({'field': 'test'}) self.assertHTMLEqual(f['field'].label_tag(), '<label for="id_field" class="required">Field:</label>') self.assertHTMLEqual( f['field'].label_tag(attrs={'class': 'foo'}), '<label for="id_field" class="foo required">Field:</label>' ) self.assertHTMLEqual(f['field2'].label_tag(), '<label for="id_field2">Field2:</label>') def test_label_split_datetime_not_displayed(self): class EventForm(Form): happened_at = SplitDateTimeField(widget=SplitHiddenDateTimeWidget) form = EventForm() self.assertHTMLEqual( form.as_ul(), '<input type="hidden" name="happened_at_0" id="id_happened_at_0">' '<input type="hidden" name="happened_at_1" id="id_happened_at_1">' ) def test_multivalue_field_validation(self): def bad_names(value): if value == 'bad value': raise ValidationError('bad value not allowed') class NameField(MultiValueField): def __init__(self, fields=(), *args, **kwargs): fields = (CharField(label='First name', max_length=10), CharField(label='Last name', max_length=10)) super().__init__(fields=fields, *args, **kwargs) def compress(self, data_list): return ' '.join(data_list) class NameForm(Form): name = NameField(validators=[bad_names]) form = NameForm(data={'name': ['bad', 'value']}) form.full_clean() self.assertFalse(form.is_valid()) self.assertEqual(form.errors, {'name': ['bad value not allowed']}) form = NameForm(data={'name': ['should be overly', 'long for the field names']}) self.assertFalse(form.is_valid()) self.assertEqual( form.errors, { 'name': [ 'Ensure this value has at most 10 characters (it has 16).', 'Ensure this value has at most 10 characters (it has 24).', ], } ) form = NameForm(data={'name': ['fname', 'lname']}) self.assertTrue(form.is_valid()) self.assertEqual(form.cleaned_data, {'name': 'fname lname'}) def test_multivalue_deep_copy(self): """ #19298 -- MultiValueField needs to override the default as it needs to deep-copy subfields: """ class ChoicesField(MultiValueField): def __init__(self, fields=(), *args, **kwargs): fields = ( ChoiceField(label='Rank', choices=((1, 1), (2, 2))), CharField(label='Name', max_length=10), ) super().__init__(fields=fields, *args, **kwargs) field = ChoicesField() field2 = copy.deepcopy(field) self.assertIsInstance(field2, ChoicesField) self.assertIsNot(field2.fields, field.fields) self.assertIsNot(field2.fields[0].choices, field.fields[0].choices) def test_multivalue_initial_data(self): """ #23674 -- invalid initial data should not break form.changed_data() """ class DateAgeField(MultiValueField): def __init__(self, fields=(), *args, **kwargs): fields = (DateField(label="Date"), IntegerField(label="Age")) super().__init__(fields=fields, *args, **kwargs) class DateAgeForm(Form): date_age = DateAgeField() data = {"date_age": ["1998-12-06", 16]} form = DateAgeForm(data, initial={"date_age": ["200-10-10", 14]}) self.assertTrue(form.has_changed()) def test_multivalue_optional_subfields(self): class PhoneField(MultiValueField): def __init__(self, *args, **kwargs): fields = ( CharField(label='Country Code', validators=[ RegexValidator(r'^\+[0-9]{1,2}$', message='Enter a valid country code.')]), CharField(label='Phone Number'), CharField(label='Extension', error_messages={'incomplete': 'Enter an extension.'}), CharField(label='Label', required=False, help_text='E.g. home, work.'), ) super().__init__(fields, *args, **kwargs) def compress(self, data_list): if data_list: return '%s.%s ext. %s (label: %s)' % tuple(data_list) return None # An empty value for any field will raise a `required` error on a # required `MultiValueField`. f = PhoneField() with self.assertRaisesMessage(ValidationError, "'This field is required.'"): f.clean('') with self.assertRaisesMessage(ValidationError, "'This field is required.'"): f.clean(None) with self.assertRaisesMessage(ValidationError, "'This field is required.'"): f.clean([]) with self.assertRaisesMessage(ValidationError, "'This field is required.'"): f.clean(['+61']) with self.assertRaisesMessage(ValidationError, "'This field is required.'"): f.clean(['+61', '287654321', '123']) self.assertEqual('+61.287654321 ext. 123 (label: Home)', f.clean(['+61', '287654321', '123', 'Home'])) with self.assertRaisesMessage(ValidationError, "'Enter a valid country code.'"): f.clean(['61', '287654321', '123', 'Home']) # Empty values for fields will NOT raise a `required` error on an # optional `MultiValueField` f = PhoneField(required=False) self.assertIsNone(f.clean('')) self.assertIsNone(f.clean(None)) self.assertIsNone(f.clean([])) self.assertEqual('+61. ext. (label: )', f.clean(['+61'])) self.assertEqual('+61.287654321 ext. 123 (label: )', f.clean(['+61', '287654321', '123'])) self.assertEqual('+61.287654321 ext. 123 (label: Home)', f.clean(['+61', '287654321', '123', 'Home'])) with self.assertRaisesMessage(ValidationError, "'Enter a valid country code.'"): f.clean(['61', '287654321', '123', 'Home']) # For a required `MultiValueField` with `require_all_fields=False`, a # `required` error will only be raised if all fields are empty. Fields # can individually be required or optional. An empty value for any # required field will raise an `incomplete` error. f = PhoneField(require_all_fields=False) with self.assertRaisesMessage(ValidationError, "'This field is required.'"): f.clean('') with self.assertRaisesMessage(ValidationError, "'This field is required.'"): f.clean(None) with self.assertRaisesMessage(ValidationError, "'This field is required.'"): f.clean([]) with self.assertRaisesMessage(ValidationError, "'Enter a complete value.'"): f.clean(['+61']) self.assertEqual('+61.287654321 ext. 123 (label: )', f.clean(['+61', '287654321', '123'])) with self.assertRaisesMessage(ValidationError, "'Enter a complete value.', 'Enter an extension.'"): f.clean(['', '', '', 'Home']) with self.assertRaisesMessage(ValidationError, "'Enter a valid country code.'"): f.clean(['61', '287654321', '123', 'Home']) # For an optional `MultiValueField` with `require_all_fields=False`, we # don't get any `required` error but we still get `incomplete` errors. f = PhoneField(required=False, require_all_fields=False) self.assertIsNone(f.clean('')) self.assertIsNone(f.clean(None)) self.assertIsNone(f.clean([])) with self.assertRaisesMessage(ValidationError, "'Enter a complete value.'"): f.clean(['+61']) self.assertEqual('+61.287654321 ext. 123 (label: )', f.clean(['+61', '287654321', '123'])) with self.assertRaisesMessage(ValidationError, "'Enter a complete value.', 'Enter an extension.'"): f.clean(['', '', '', 'Home']) with self.assertRaisesMessage(ValidationError, "'Enter a valid country code.'"): f.clean(['61', '287654321', '123', 'Home']) def test_custom_empty_values(self): """ Form fields can customize what is considered as an empty value for themselves (#19997). """ class CustomJSONField(CharField): empty_values = [None, ''] def to_python(self, value): # Fake json.loads if value == '{}': return {} return super().to_python(value) class JSONForm(forms.Form): json = CustomJSONField() form = JSONForm(data={'json': '{}'}) form.full_clean() self.assertEqual(form.cleaned_data, {'json': {}}) def test_boundfield_label_tag(self): class SomeForm(Form): field = CharField() boundfield = SomeForm()['field'] testcases = [ # (args, kwargs, expected) # without anything: just print the <label> ((), {}, '<label for="id_field">Field:</label>'), # passing just one argument: overrides the field's label (('custom',), {}, '<label for="id_field">custom:</label>'), # the overridden label is escaped (('custom&',), {}, '<label for="id_field">custom&amp;:</label>'), ((mark_safe('custom&'),), {}, '<label for="id_field">custom&:</label>'), # Passing attrs to add extra attributes on the <label> ((), {'attrs': {'class': 'pretty'}}, '<label for="id_field" class="pretty">Field:</label>') ] for args, kwargs, expected in testcases: with self.subTest(args=args, kwargs=kwargs): self.assertHTMLEqual(boundfield.label_tag(*args, **kwargs), expected) def test_boundfield_label_tag_no_id(self): """ If a widget has no id, label_tag just returns the text with no surrounding <label>. """ class SomeForm(Form): field = CharField() boundfield = SomeForm(auto_id='')['field'] self.assertHTMLEqual(boundfield.label_tag(), 'Field:') self.assertHTMLEqual(boundfield.label_tag('Custom&'), 'Custom&amp;:') def test_boundfield_label_tag_custom_widget_id_for_label(self): class CustomIdForLabelTextInput(TextInput): def id_for_label(self, id): return 'custom_' + id class EmptyIdForLabelTextInput(TextInput): def id_for_label(self, id): return None class SomeForm(Form): custom = CharField(widget=CustomIdForLabelTextInput) empty = CharField(widget=EmptyIdForLabelTextInput) form = SomeForm() self.assertHTMLEqual(form['custom'].label_tag(), '<label for="custom_id_custom">Custom:</label>') self.assertHTMLEqual(form['empty'].label_tag(), '<label>Empty:</label>') def test_boundfield_empty_label(self): class SomeForm(Form): field = CharField(label='') boundfield = SomeForm()['field'] self.assertHTMLEqual(boundfield.label_tag(), '<label for="id_field"></label>') def test_boundfield_id_for_label(self): class SomeForm(Form): field = CharField(label='') self.assertEqual(SomeForm()['field'].id_for_label, 'id_field') def test_boundfield_id_for_label_override_by_attrs(self): """ If an id is provided in `Widget.attrs`, it overrides the generated ID, unless it is `None`. """ class SomeForm(Form): field = CharField(widget=TextInput(attrs={'id': 'myCustomID'})) field_none = CharField(widget=TextInput(attrs={'id': None})) form = SomeForm() self.assertEqual(form['field'].id_for_label, 'myCustomID') self.assertEqual(form['field_none'].id_for_label, 'id_field_none') def test_label_tag_override(self): """ BoundField label_suffix (if provided) overrides Form label_suffix """ class SomeForm(Form): field = CharField() boundfield = SomeForm(label_suffix='!')['field'] self.assertHTMLEqual(boundfield.label_tag(label_suffix='$'), '<label for="id_field">Field$</label>') def test_field_name(self): """#5749 - `field_name` may be used as a key in _html_output().""" class SomeForm(Form): some_field = CharField() def as_p(self): return self._html_output( normal_row='<p id="p_%(field_name)s"></p>', error_row='%s', row_ender='</p>', help_text_html=' %s', errors_on_separate_row=True, ) form = SomeForm() self.assertHTMLEqual(form.as_p(), '<p id="p_some_field"></p>') def test_field_without_css_classes(self): """ `css_classes` may be used as a key in _html_output() (empty classes). """ class SomeForm(Form): some_field = CharField() def as_p(self): return self._html_output( normal_row='<p class="%(css_classes)s"></p>', error_row='%s', row_ender='</p>', help_text_html=' %s', errors_on_separate_row=True, ) form = SomeForm() self.assertHTMLEqual(form.as_p(), '<p class=""></p>') def test_field_with_css_class(self): """ `css_classes` may be used as a key in _html_output() (class comes from required_css_class in this case). """ class SomeForm(Form): some_field = CharField() required_css_class = 'foo' def as_p(self): return self._html_output( normal_row='<p class="%(css_classes)s"></p>', error_row='%s', row_ender='</p>', help_text_html=' %s', errors_on_separate_row=True, ) form = SomeForm() self.assertHTMLEqual(form.as_p(), '<p class="foo"></p>') def test_field_name_with_hidden_input(self): """ BaseForm._html_output() should merge all the hidden input fields and put them in the last row. """ class SomeForm(Form): hidden1 = CharField(widget=HiddenInput) custom = CharField() hidden2 = CharField(widget=HiddenInput) def as_p(self): return self._html_output( normal_row='<p%(html_class_attr)s>%(field)s %(field_name)s</p>', error_row='%s', row_ender='</p>', help_text_html=' %s', errors_on_separate_row=True, ) form = SomeForm() self.assertHTMLEqual( form.as_p(), '<p><input id="id_custom" name="custom" type="text" required> custom' '<input id="id_hidden1" name="hidden1" type="hidden">' '<input id="id_hidden2" name="hidden2" type="hidden"></p>' ) def test_field_name_with_hidden_input_and_non_matching_row_ender(self): """ BaseForm._html_output() should merge all the hidden input fields and put them in the last row ended with the specific row ender. """ class SomeForm(Form): hidden1 = CharField(widget=HiddenInput) custom = CharField() hidden2 = CharField(widget=HiddenInput) def as_p(self): return self._html_output( normal_row='<p%(html_class_attr)s>%(field)s %(field_name)s</p>', error_row='%s', row_ender='<hr><hr>', help_text_html=' %s', errors_on_separate_row=True ) form = SomeForm() self.assertHTMLEqual( form.as_p(), '<p><input id="id_custom" name="custom" type="text" required> custom</p>\n' '<input id="id_hidden1" name="hidden1" type="hidden">' '<input id="id_hidden2" name="hidden2" type="hidden"><hr><hr>' ) def test_error_dict(self): class MyForm(Form): foo = CharField() bar = CharField() def clean(self): raise ValidationError('Non-field error.', code='secret', params={'a': 1, 'b': 2}) form = MyForm({}) self.assertIs(form.is_valid(), False) errors = form.errors.as_text() control = [ '* foo\n * This field is required.', '* bar\n * This field is required.', '* __all__\n * Non-field error.', ] for error in control: self.assertIn(error, errors) errors = form.errors.as_ul() control = [ '<li>foo<ul class="errorlist"><li>This field is required.</li></ul></li>', '<li>bar<ul class="errorlist"><li>This field is required.</li></ul></li>', '<li>__all__<ul class="errorlist nonfield"><li>Non-field error.</li></ul></li>', ] for error in control: self.assertInHTML(error, errors) errors = form.errors.get_json_data() control = { 'foo': [{'code': 'required', 'message': 'This field is required.'}], 'bar': [{'code': 'required', 'message': 'This field is required.'}], '__all__': [{'code': 'secret', 'message': 'Non-field error.'}] } self.assertEqual(errors, control) self.assertEqual(json.dumps(errors), form.errors.as_json()) def test_error_dict_as_json_escape_html(self): """#21962 - adding html escape flag to ErrorDict""" class MyForm(Form): foo = CharField() bar = CharField() def clean(self): raise ValidationError( '<p>Non-field error.</p>', code='secret', params={'a': 1, 'b': 2}, ) control = { 'foo': [{'code': 'required', 'message': 'This field is required.'}], 'bar': [{'code': 'required', 'message': 'This field is required.'}], '__all__': [{'code': 'secret', 'message': '<p>Non-field error.</p>'}] } form = MyForm({}) self.assertFalse(form.is_valid()) errors = json.loads(form.errors.as_json()) self.assertEqual(errors, control) escaped_error = '&lt;p&gt;Non-field error.&lt;/p&gt;' self.assertEqual( form.errors.get_json_data(escape_html=True)['__all__'][0]['message'], escaped_error ) errors = json.loads(form.errors.as_json(escape_html=True)) control['__all__'][0]['message'] = escaped_error self.assertEqual(errors, control) def test_error_list(self): e = ErrorList() e.append('Foo') e.append(ValidationError('Foo%(bar)s', code='foobar', params={'bar': 'bar'})) self.assertIsInstance(e, list) self.assertIn('Foo', e) self.assertIn('Foo', forms.ValidationError(e)) self.assertEqual( e.as_text(), '* Foo\n* Foobar' ) self.assertEqual( e.as_ul(), '<ul class="errorlist"><li>Foo</li><li>Foobar</li></ul>' ) errors = e.get_json_data() self.assertEqual( errors, [{"message": "Foo", "code": ""}, {"message": "Foobar", "code": "foobar"}] ) self.assertEqual(json.dumps(errors), e.as_json()) def test_error_list_class_not_specified(self): e = ErrorList() e.append('Foo') e.append(ValidationError('Foo%(bar)s', code='foobar', params={'bar': 'bar'})) self.assertEqual( e.as_ul(), '<ul class="errorlist"><li>Foo</li><li>Foobar</li></ul>' ) def test_error_list_class_has_one_class_specified(self): e = ErrorList(error_class='foobar-error-class') e.append('Foo') e.append(ValidationError('Foo%(bar)s', code='foobar', params={'bar': 'bar'})) self.assertEqual( e.as_ul(), '<ul class="errorlist foobar-error-class"><li>Foo</li><li>Foobar</li></ul>' ) def test_error_list_with_hidden_field_errors_has_correct_class(self): class Person(Form): first_name = CharField() last_name = CharField(widget=HiddenInput) p = Person({'first_name': 'John'}) self.assertHTMLEqual( p.as_ul(), """<li><ul class="errorlist nonfield"> <li>(Hidden field last_name) This field is required.</li></ul></li><li> <label for="id_first_name">First name:</label> <input id="id_first_name" name="first_name" type="text" value="John" required> <input id="id_last_name" name="last_name" type="hidden"></li>""" ) self.assertHTMLEqual( p.as_p(), """<ul class="errorlist nonfield"><li>(Hidden field last_name) This field is required.</li></ul> <p><label for="id_first_name">First name:</label> <input id="id_first_name" name="first_name" type="text" value="John" required> <input id="id_last_name" name="last_name" type="hidden"></p>""" ) self.assertHTMLEqual( p.as_table(), """<tr><td colspan="2"><ul class="errorlist nonfield"> <li>(Hidden field last_name) This field is required.</li></ul></td></tr> <tr><th><label for="id_first_name">First name:</label></th><td> <input id="id_first_name" name="first_name" type="text" value="John" required> <input id="id_last_name" name="last_name" type="hidden"></td></tr>""" ) def test_error_list_with_non_field_errors_has_correct_class(self): class Person(Form): first_name = CharField() last_name = CharField() def clean(self): raise ValidationError('Generic validation error') p = Person({'first_name': 'John', 'last_name': 'Lennon'}) self.assertHTMLEqual( str(p.non_field_errors()), '<ul class="errorlist nonfield"><li>Generic validation error</li></ul>' ) self.assertHTMLEqual( p.as_ul(), """<li> <ul class="errorlist nonfield"><li>Generic validation error</li></ul></li> <li><label for="id_first_name">First name:</label> <input id="id_first_name" name="first_name" type="text" value="John" required></li> <li><label for="id_last_name">Last name:</label> <input id="id_last_name" name="last_name" type="text" value="Lennon" required></li>""" ) self.assertHTMLEqual( p.non_field_errors().as_text(), '* Generic validation error' ) self.assertHTMLEqual( p.as_p(), """<ul class="errorlist nonfield"><li>Generic validation error</li></ul> <p><label for="id_first_name">First name:</label> <input id="id_first_name" name="first_name" type="text" value="John" required></p> <p><label for="id_last_name">Last name:</label> <input id="id_last_name" name="last_name" type="text" value="Lennon" required></p>""" ) self.assertHTMLEqual( p.as_table(), """<tr><td colspan="2"><ul class="errorlist nonfield"><li>Generic validation error</li></ul></td></tr> <tr><th><label for="id_first_name">First name:</label></th><td> <input id="id_first_name" name="first_name" type="text" value="John" required></td></tr> <tr><th><label for="id_last_name">Last name:</label></th><td> <input id="id_last_name" name="last_name" type="text" value="Lennon" required></td></tr>""" ) def test_errorlist_override(self): class DivErrorList(ErrorList): def __str__(self): return self.as_divs() def as_divs(self): if not self: return '' return '<div class="errorlist">%s</div>' % ''.join( '<div class="error">%s</div>' % e for e in self) class CommentForm(Form): name = CharField(max_length=50, required=False) email = EmailField() comment = CharField() data = {'email': 'invalid'} f = CommentForm(data, auto_id=False, error_class=DivErrorList) self.assertHTMLEqual(f.as_p(), """<p>Name: <input type="text" name="name" maxlength="50"></p> <div class="errorlist"><div class="error">Enter a valid email address.</div></div> <p>Email: <input type="email" name="email" value="invalid" required></p> <div class="errorlist"><div class="error">This field is required.</div></div> <p>Comment: <input type="text" name="comment" required></p>""") def test_error_escaping(self): class TestForm(Form): hidden = CharField(widget=HiddenInput(), required=False) visible = CharField() def clean_hidden(self): raise ValidationError('Foo & "bar"!') clean_visible = clean_hidden form = TestForm({'hidden': 'a', 'visible': 'b'}) form.is_valid() self.assertHTMLEqual( form.as_ul(), '<li><ul class="errorlist nonfield"><li>(Hidden field hidden) Foo &amp; &quot;bar&quot;!</li></ul></li>' '<li><ul class="errorlist"><li>Foo &amp; &quot;bar&quot;!</li></ul>' '<label for="id_visible">Visible:</label> ' '<input type="text" name="visible" value="b" id="id_visible" required>' '<input type="hidden" name="hidden" value="a" id="id_hidden"></li>' ) def test_baseform_repr(self): """ BaseForm.__repr__() should contain some basic information about the form. """ p = Person() self.assertEqual(repr(p), "<Person bound=False, valid=Unknown, fields=(first_name;last_name;birthday)>") p = Person({'first_name': 'John', 'last_name': 'Lennon', 'birthday': '1940-10-9'}) self.assertEqual(repr(p), "<Person bound=True, valid=Unknown, fields=(first_name;last_name;birthday)>") p.is_valid() self.assertEqual(repr(p), "<Person bound=True, valid=True, fields=(first_name;last_name;birthday)>") p = Person({'first_name': 'John', 'last_name': 'Lennon', 'birthday': 'fakedate'}) p.is_valid() self.assertEqual(repr(p), "<Person bound=True, valid=False, fields=(first_name;last_name;birthday)>") def test_baseform_repr_dont_trigger_validation(self): """ BaseForm.__repr__() shouldn't trigger the form validation. """ p = Person({'first_name': 'John', 'last_name': 'Lennon', 'birthday': 'fakedate'}) repr(p) with self.assertRaises(AttributeError): p.cleaned_data self.assertFalse(p.is_valid()) self.assertEqual(p.cleaned_data, {'first_name': 'John', 'last_name': 'Lennon'}) def test_accessing_clean(self): class UserForm(Form): username = CharField(max_length=10) password = CharField(widget=PasswordInput) def clean(self): data = self.cleaned_data if not self.errors: data['username'] = data['username'].lower() return data f = UserForm({'username': 'SirRobin', 'password': 'blue'}) self.assertTrue(f.is_valid()) self.assertEqual(f.cleaned_data['username'], 'sirrobin') def test_changing_cleaned_data_nothing_returned(self): class UserForm(Form): username = CharField(max_length=10) password = CharField(widget=PasswordInput) def clean(self): self.cleaned_data['username'] = self.cleaned_data['username'].lower() # don't return anything f = UserForm({'username': 'SirRobin', 'password': 'blue'}) self.assertTrue(f.is_valid()) self.assertEqual(f.cleaned_data['username'], 'sirrobin') def test_changing_cleaned_data_in_clean(self): class UserForm(Form): username = CharField(max_length=10) password = CharField(widget=PasswordInput) def clean(self): data = self.cleaned_data # Return a different dict. We have not changed self.cleaned_data. return { 'username': data['username'].lower(), 'password': 'this_is_not_a_secret', } f = UserForm({'username': 'SirRobin', 'password': 'blue'}) self.assertTrue(f.is_valid()) self.assertEqual(f.cleaned_data['username'], 'sirrobin') def test_multipart_encoded_form(self): class FormWithoutFile(Form): username = CharField() class FormWithFile(Form): username = CharField() file = FileField() class FormWithImage(Form): image = ImageField() self.assertFalse(FormWithoutFile().is_multipart()) self.assertTrue(FormWithFile().is_multipart()) self.assertTrue(FormWithImage().is_multipart()) def test_html_safe(self): class SimpleForm(Form): username = CharField() form = SimpleForm() self.assertTrue(hasattr(SimpleForm, '__html__')) self.assertEqual(str(form), form.__html__()) self.assertTrue(hasattr(form['username'], '__html__')) self.assertEqual(str(form['username']), form['username'].__html__()) def test_use_required_attribute_true(self): class MyForm(Form): use_required_attribute = True f1 = CharField(max_length=30) f2 = CharField(max_length=30, required=False) f3 = CharField(widget=Textarea) f4 = ChoiceField(choices=[('P', 'Python'), ('J', 'Java')]) form = MyForm() self.assertHTMLEqual( form.as_p(), '<p><label for="id_f1">F1:</label> <input id="id_f1" maxlength="30" name="f1" type="text" required></p>' '<p><label for="id_f2">F2:</label> <input id="id_f2" maxlength="30" name="f2" type="text"></p>' '<p><label for="id_f3">F3:</label> <textarea cols="40" id="id_f3" name="f3" rows="10" required>' '</textarea></p>' '<p><label for="id_f4">F4:</label> <select id="id_f4" name="f4">' '<option value="P">Python</option>' '<option value="J">Java</option>' '</select></p>', ) self.assertHTMLEqual( form.as_ul(), '<li><label for="id_f1">F1:</label> ' '<input id="id_f1" maxlength="30" name="f1" type="text" required></li>' '<li><label for="id_f2">F2:</label> <input id="id_f2" maxlength="30" name="f2" type="text"></li>' '<li><label for="id_f3">F3:</label> <textarea cols="40" id="id_f3" name="f3" rows="10" required>' '</textarea></li>' '<li><label for="id_f4">F4:</label> <select id="id_f4" name="f4">' '<option value="P">Python</option>' '<option value="J">Java</option>' '</select></li>', ) self.assertHTMLEqual( form.as_table(), '<tr><th><label for="id_f1">F1:</label></th>' '<td><input id="id_f1" maxlength="30" name="f1" type="text" required></td></tr>' '<tr><th><label for="id_f2">F2:</label></th>' '<td><input id="id_f2" maxlength="30" name="f2" type="text"></td></tr>' '<tr><th><label for="id_f3">F3:</label></th>' '<td><textarea cols="40" id="id_f3" name="f3" rows="10" required>' '</textarea></td></tr>' '<tr><th><label for="id_f4">F4:</label></th><td><select id="id_f4" name="f4">' '<option value="P">Python</option>' '<option value="J">Java</option>' '</select></td></tr>', ) def test_use_required_attribute_false(self): class MyForm(Form): use_required_attribute = False f1 = CharField(max_length=30) f2 = CharField(max_length=30, required=False) f3 = CharField(widget=Textarea) f4 = ChoiceField(choices=[('P', 'Python'), ('J', 'Java')]) form = MyForm() self.assertHTMLEqual( form.as_p(), '<p><label for="id_f1">F1:</label> <input id="id_f1" maxlength="30" name="f1" type="text"></p>' '<p><label for="id_f2">F2:</label> <input id="id_f2" maxlength="30" name="f2" type="text"></p>' '<p><label for="id_f3">F3:</label> <textarea cols="40" id="id_f3" name="f3" rows="10">' '</textarea></p>' '<p><label for="id_f4">F4:</label> <select id="id_f4" name="f4">' '<option value="P">Python</option>' '<option value="J">Java</option>' '</select></p>', ) self.assertHTMLEqual( form.as_ul(), '<li><label for="id_f1">F1:</label> <input id="id_f1" maxlength="30" name="f1" type="text"></li>' '<li><label for="id_f2">F2:</label> <input id="id_f2" maxlength="30" name="f2" type="text"></li>' '<li><label for="id_f3">F3:</label> <textarea cols="40" id="id_f3" name="f3" rows="10">' '</textarea></li>' '<li><label for="id_f4">F4:</label> <select id="id_f4" name="f4">' '<option value="P">Python</option>' '<option value="J">Java</option>' '</select></li>', ) self.assertHTMLEqual( form.as_table(), '<tr><th><label for="id_f1">F1:</label></th>' '<td><input id="id_f1" maxlength="30" name="f1" type="text"></td></tr>' '<tr><th><label for="id_f2">F2:</label></th>' '<td><input id="id_f2" maxlength="30" name="f2" type="text"></td></tr>' '<tr><th><label for="id_f3">F3:</label></th><td><textarea cols="40" id="id_f3" name="f3" rows="10">' '</textarea></td></tr>' '<tr><th><label for="id_f4">F4:</label></th><td><select id="id_f4" name="f4">' '<option value="P">Python</option>' '<option value="J">Java</option>' '</select></td></tr>', ) def test_only_hidden_fields(self): # A form with *only* hidden fields that has errors is going to be very unusual. class HiddenForm(Form): data = IntegerField(widget=HiddenInput) f = HiddenForm({}) self.assertHTMLEqual( f.as_p(), '<ul class="errorlist nonfield">' '<li>(Hidden field data) This field is required.</li></ul>\n<p> ' '<input type="hidden" name="data" id="id_data"></p>' ) self.assertHTMLEqual( f.as_table(), '<tr><td colspan="2"><ul class="errorlist nonfield">' '<li>(Hidden field data) This field is required.</li></ul>' '<input type="hidden" name="data" id="id_data"></td></tr>' ) def test_field_named_data(self): class DataForm(Form): data = CharField(max_length=10) f = DataForm({'data': 'xyzzy'}) self.assertTrue(f.is_valid()) self.assertEqual(f.cleaned_data, {'data': 'xyzzy'}) def test_empty_data_files_multi_value_dict(self): p = Person() self.assertIsInstance(p.data, MultiValueDict) self.assertIsInstance(p.files, MultiValueDict) def test_field_deep_copy_error_messages(self): class CustomCharField(CharField): def __init__(self, **kwargs): kwargs['error_messages'] = {'invalid': 'Form custom error message.'} super().__init__(**kwargs) field = CustomCharField() field_copy = copy.deepcopy(field) self.assertIsInstance(field_copy, CustomCharField) self.assertIsNot(field_copy.error_messages, field.error_messages) class CustomRenderer(DjangoTemplates): pass class RendererTests(SimpleTestCase): def test_default(self): form = Form() self.assertEqual(form.renderer, get_default_renderer()) def test_kwarg_instance(self): custom = CustomRenderer() form = Form(renderer=custom) self.assertEqual(form.renderer, custom) def test_kwarg_class(self): custom = CustomRenderer() form = Form(renderer=custom) self.assertEqual(form.renderer, custom) def test_attribute_instance(self): class CustomForm(Form): default_renderer = DjangoTemplates() form = CustomForm() self.assertEqual(form.renderer, CustomForm.default_renderer) def test_attribute_class(self): class CustomForm(Form): default_renderer = CustomRenderer form = CustomForm() self.assertIsInstance(form.renderer, CustomForm.default_renderer) def test_attribute_override(self): class CustomForm(Form): default_renderer = DjangoTemplates() custom = CustomRenderer() form = CustomForm(renderer=custom) self.assertEqual(form.renderer, custom)
7621f338817f25f1ed5a66add95dd53d1a0bb304c16823614f80148123c11bbf
import importlib import inspect import os import re import sys import tempfile import threading from io import StringIO from pathlib import Path from unittest import mock from django.core import mail from django.core.files.uploadedfile import SimpleUploadedFile from django.db import DatabaseError, connection from django.http import Http404 from django.shortcuts import render from django.template import TemplateDoesNotExist from django.test import RequestFactory, SimpleTestCase, override_settings from django.test.utils import LoggingCaptureMixin from django.urls import path, reverse from django.urls.converters import IntConverter from django.utils.functional import SimpleLazyObject from django.utils.regex_helper import _lazy_re_compile from django.utils.safestring import mark_safe from django.views.debug import ( CallableSettingWrapper, ExceptionReporter, Path as DebugPath, SafeExceptionReporterFilter, default_urlconf, get_default_exception_reporter_filter, technical_404_response, technical_500_response, ) from django.views.decorators.debug import ( sensitive_post_parameters, sensitive_variables, ) from ..views import ( custom_exception_reporter_filter_view, index_page, multivalue_dict_key_error, non_sensitive_view, paranoid_view, sensitive_args_function_caller, sensitive_kwargs_function_caller, sensitive_method_view, sensitive_view, ) class User: def __str__(self): return 'jacob' class WithoutEmptyPathUrls: urlpatterns = [path('url/', index_page, name='url')] class CallableSettingWrapperTests(SimpleTestCase): """ Unittests for CallableSettingWrapper """ def test_repr(self): class WrappedCallable: def __repr__(self): return "repr from the wrapped callable" def __call__(self): pass actual = repr(CallableSettingWrapper(WrappedCallable())) self.assertEqual(actual, "repr from the wrapped callable") @override_settings(DEBUG=True, ROOT_URLCONF='view_tests.urls') class DebugViewTests(SimpleTestCase): def test_files(self): with self.assertLogs('django.request', 'ERROR'): response = self.client.get('/raises/') self.assertEqual(response.status_code, 500) data = { 'file_data.txt': SimpleUploadedFile('file_data.txt', b'haha'), } with self.assertLogs('django.request', 'ERROR'): response = self.client.post('/raises/', data) self.assertContains(response, 'file_data.txt', status_code=500) self.assertNotContains(response, 'haha', status_code=500) def test_400(self): # When DEBUG=True, technical_500_template() is called. with self.assertLogs('django.security', 'WARNING'): response = self.client.get('/raises400/') self.assertContains(response, '<div class="context" id="', status_code=400) # Ensure no 403.html template exists to test the default case. @override_settings(TEMPLATES=[{ 'BACKEND': 'django.template.backends.django.DjangoTemplates', }]) def test_403(self): response = self.client.get('/raises403/') self.assertContains(response, '<h1>403 Forbidden</h1>', status_code=403) # Set up a test 403.html template. @override_settings(TEMPLATES=[{ 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'OPTIONS': { 'loaders': [ ('django.template.loaders.locmem.Loader', { '403.html': 'This is a test template for a 403 error ({{ exception }}).', }), ], }, }]) def test_403_template(self): response = self.client.get('/raises403/') self.assertContains(response, 'test template', status_code=403) self.assertContains(response, '(Insufficient Permissions).', status_code=403) def test_404(self): response = self.client.get('/raises404/') self.assertEqual(response.status_code, 404) self.assertContains(response, "<code>not-in-urls</code>, didn't match", status_code=404) def test_404_not_in_urls(self): response = self.client.get('/not-in-urls') self.assertNotContains(response, "Raised by:", status_code=404) self.assertContains(response, "Django tried these URL patterns", status_code=404) self.assertContains(response, "<code>not-in-urls</code>, didn't match", status_code=404) # Pattern and view name of a RegexURLPattern appear. self.assertContains(response, r"^regex-post/(?P&lt;pk&gt;[0-9]+)/$", status_code=404) self.assertContains(response, "[name='regex-post']", status_code=404) # Pattern and view name of a RoutePattern appear. self.assertContains(response, r"path-post/&lt;int:pk&gt;/", status_code=404) self.assertContains(response, "[name='path-post']", status_code=404) @override_settings(ROOT_URLCONF=WithoutEmptyPathUrls) def test_404_empty_path_not_in_urls(self): response = self.client.get('/') self.assertContains(response, "The empty path didn't match any of these.", status_code=404) def test_technical_404(self): response = self.client.get('/technical404/') self.assertContains(response, "Raised by:", status_code=404) self.assertContains(response, "view_tests.views.technical404", status_code=404) def test_classbased_technical_404(self): response = self.client.get('/classbased404/') self.assertContains(response, "Raised by:", status_code=404) self.assertContains(response, "view_tests.views.Http404View", status_code=404) def test_non_l10ned_numeric_ids(self): """ Numeric IDs and fancy traceback context blocks line numbers shouldn't be localized. """ with self.settings(DEBUG=True, USE_L10N=True): with self.assertLogs('django.request', 'ERROR'): response = self.client.get('/raises500/') # We look for a HTML fragment of the form # '<div class="context" id="c38123208">', not '<div class="context" id="c38,123,208"' self.assertContains(response, '<div class="context" id="', status_code=500) match = re.search(b'<div class="context" id="(?P<id>[^"]+)">', response.content) self.assertIsNotNone(match) id_repr = match.group('id') self.assertFalse( re.search(b'[^c0-9]', id_repr), "Numeric IDs in debug response HTML page shouldn't be localized (value: %s)." % id_repr.decode() ) def test_template_exceptions(self): with self.assertLogs('django.request', 'ERROR'): try: self.client.get(reverse('template_exception')) except Exception: raising_loc = inspect.trace()[-1][-2][0].strip() self.assertNotEqual( raising_loc.find("raise Exception('boom')"), -1, "Failed to find 'raise Exception' in last frame of " "traceback, instead found: %s" % raising_loc ) def test_template_loader_postmortem(self): """Tests for not existing file""" template_name = "notfound.html" with tempfile.NamedTemporaryFile(prefix=template_name) as tmpfile: tempdir = os.path.dirname(tmpfile.name) template_path = os.path.join(tempdir, template_name) with override_settings(TEMPLATES=[{ 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [tempdir], }]), self.assertLogs('django.request', 'ERROR'): response = self.client.get(reverse('raises_template_does_not_exist', kwargs={"path": template_name})) self.assertContains(response, "%s (Source does not exist)" % template_path, status_code=500, count=2) # Assert as HTML. self.assertContains( response, '<li><code>django.template.loaders.filesystem.Loader</code>: ' '%s (Source does not exist)</li>' % os.path.join(tempdir, 'notfound.html'), status_code=500, html=True, ) def test_no_template_source_loaders(self): """ Make sure if you don't specify a template, the debug view doesn't blow up. """ with self.assertLogs('django.request', 'ERROR'): with self.assertRaises(TemplateDoesNotExist): self.client.get('/render_no_template/') @override_settings(ROOT_URLCONF='view_tests.default_urls') def test_default_urlconf_template(self): """ Make sure that the default URLconf template is shown shown instead of the technical 404 page, if the user has not altered their URLconf yet. """ response = self.client.get('/') self.assertContains( response, "<h2>The install worked successfully! Congratulations!</h2>" ) @override_settings(ROOT_URLCONF='view_tests.regression_21530_urls') def test_regression_21530(self): """ Regression test for bug #21530. If the admin app include is replaced with exactly one url pattern, then the technical 404 template should be displayed. The bug here was that an AttributeError caused a 500 response. """ response = self.client.get('/') self.assertContains( response, "Page not found <span>(404)</span>", status_code=404 ) def test_template_encoding(self): """ The templates are loaded directly, not via a template loader, and should be opened as utf-8 charset as is the default specified on template engines. """ with mock.patch.object(DebugPath, 'open') as m: default_urlconf(None) m.assert_called_once_with(encoding='utf-8') m.reset_mock() technical_404_response(mock.MagicMock(), mock.Mock()) m.assert_called_once_with(encoding='utf-8') def test_technical_404_converter_raise_404(self): with mock.patch.object(IntConverter, 'to_python', side_effect=Http404): response = self.client.get('/path-post/1/') self.assertContains(response, 'Page not found', status_code=404) class DebugViewQueriesAllowedTests(SimpleTestCase): # May need a query to initialize MySQL connection databases = {'default'} def test_handle_db_exception(self): """ Ensure the debug view works when a database exception is raised by performing an invalid query and passing the exception to the debug view. """ with connection.cursor() as cursor: try: cursor.execute('INVALID SQL') except DatabaseError: exc_info = sys.exc_info() rf = RequestFactory() response = technical_500_response(rf.get('/'), *exc_info) self.assertContains(response, 'OperationalError at /', status_code=500) @override_settings( DEBUG=True, ROOT_URLCONF='view_tests.urls', # No template directories are configured, so no templates will be found. TEMPLATES=[{ 'BACKEND': 'django.template.backends.dummy.TemplateStrings', }], ) class NonDjangoTemplatesDebugViewTests(SimpleTestCase): def test_400(self): # When DEBUG=True, technical_500_template() is called. with self.assertLogs('django.security', 'WARNING'): response = self.client.get('/raises400/') self.assertContains(response, '<div class="context" id="', status_code=400) def test_403(self): response = self.client.get('/raises403/') self.assertContains(response, '<h1>403 Forbidden</h1>', status_code=403) def test_404(self): response = self.client.get('/raises404/') self.assertEqual(response.status_code, 404) def test_template_not_found_error(self): # Raises a TemplateDoesNotExist exception and shows the debug view. url = reverse('raises_template_does_not_exist', kwargs={"path": "notfound.html"}) with self.assertLogs('django.request', 'ERROR'): response = self.client.get(url) self.assertContains(response, '<div class="context" id="', status_code=500) class ExceptionReporterTests(SimpleTestCase): rf = RequestFactory() def test_request_and_exception(self): "A simple exception report can be generated" try: request = self.rf.get('/test_view/') request.user = User() raise ValueError("Can't find my keys") except ValueError: exc_type, exc_value, tb = sys.exc_info() reporter = ExceptionReporter(request, exc_type, exc_value, tb) html = reporter.get_traceback_html() self.assertInHTML('<h1>ValueError at /test_view/</h1>', html) self.assertIn('<pre class="exception_value">Can&#x27;t find my keys</pre>', html) self.assertIn('<th>Request Method:</th>', html) self.assertIn('<th>Request URL:</th>', html) self.assertIn('<h3 id="user-info">USER</h3>', html) self.assertIn('<p>jacob</p>', html) self.assertIn('<th>Exception Type:</th>', html) self.assertIn('<th>Exception Value:</th>', html) self.assertIn('<h2>Traceback ', html) self.assertIn('<h2>Request information</h2>', html) self.assertNotIn('<p>Request data not supplied</p>', html) self.assertIn('<p>No POST data</p>', html) def test_no_request(self): "An exception report can be generated without request" try: raise ValueError("Can't find my keys") except ValueError: exc_type, exc_value, tb = sys.exc_info() reporter = ExceptionReporter(None, exc_type, exc_value, tb) html = reporter.get_traceback_html() self.assertInHTML('<h1>ValueError</h1>', html) self.assertIn('<pre class="exception_value">Can&#x27;t find my keys</pre>', html) self.assertNotIn('<th>Request Method:</th>', html) self.assertNotIn('<th>Request URL:</th>', html) self.assertNotIn('<h3 id="user-info">USER</h3>', html) self.assertIn('<th>Exception Type:</th>', html) self.assertIn('<th>Exception Value:</th>', html) self.assertIn('<h2>Traceback ', html) self.assertIn('<h2>Request information</h2>', html) self.assertIn('<p>Request data not supplied</p>', html) def test_eol_support(self): """The ExceptionReporter supports Unix, Windows and Macintosh EOL markers""" LINES = ['print %d' % i for i in range(1, 6)] reporter = ExceptionReporter(None, None, None, None) for newline in ['\n', '\r\n', '\r']: fd, filename = tempfile.mkstemp(text=False) os.write(fd, (newline.join(LINES) + newline).encode()) os.close(fd) try: self.assertEqual( reporter._get_lines_from_file(filename, 3, 2), (1, LINES[1:3], LINES[3], LINES[4:]) ) finally: os.unlink(filename) def test_no_exception(self): "An exception report can be generated for just a request" request = self.rf.get('/test_view/') reporter = ExceptionReporter(request, None, None, None) html = reporter.get_traceback_html() self.assertInHTML('<h1>Report at /test_view/</h1>', html) self.assertIn('<pre class="exception_value">No exception message supplied</pre>', html) self.assertIn('<th>Request Method:</th>', html) self.assertIn('<th>Request URL:</th>', html) self.assertNotIn('<th>Exception Type:</th>', html) self.assertNotIn('<th>Exception Value:</th>', html) self.assertNotIn('<h2>Traceback ', html) self.assertIn('<h2>Request information</h2>', html) self.assertNotIn('<p>Request data not supplied</p>', html) def test_reporting_of_nested_exceptions(self): request = self.rf.get('/test_view/') try: try: raise AttributeError(mark_safe('<p>Top level</p>')) except AttributeError as explicit: try: raise ValueError(mark_safe('<p>Second exception</p>')) from explicit except ValueError: raise IndexError(mark_safe('<p>Final exception</p>')) except Exception: # Custom exception handler, just pass it into ExceptionReporter exc_type, exc_value, tb = sys.exc_info() explicit_exc = 'The above exception ({0}) was the direct cause of the following exception:' implicit_exc = 'During handling of the above exception ({0}), another exception occurred:' reporter = ExceptionReporter(request, exc_type, exc_value, tb) html = reporter.get_traceback_html() # Both messages are twice on page -- one rendered as html, # one as plain text (for pastebin) self.assertEqual(2, html.count(explicit_exc.format('&lt;p&gt;Top level&lt;/p&gt;'))) self.assertEqual(2, html.count(implicit_exc.format('&lt;p&gt;Second exception&lt;/p&gt;'))) self.assertEqual(10, html.count('&lt;p&gt;Final exception&lt;/p&gt;')) text = reporter.get_traceback_text() self.assertIn(explicit_exc.format('<p>Top level</p>'), text) self.assertIn(implicit_exc.format('<p>Second exception</p>'), text) self.assertEqual(3, text.count('<p>Final exception</p>')) def test_reporting_frames_without_source(self): try: source = "def funcName():\n raise Error('Whoops')\nfuncName()" namespace = {} code = compile(source, 'generated', 'exec') exec(code, namespace) except Exception: exc_type, exc_value, tb = sys.exc_info() request = self.rf.get('/test_view/') reporter = ExceptionReporter(request, exc_type, exc_value, tb) frames = reporter.get_traceback_frames() last_frame = frames[-1] self.assertEqual(last_frame['context_line'], '<source code not available>') self.assertEqual(last_frame['filename'], 'generated') self.assertEqual(last_frame['function'], 'funcName') self.assertEqual(last_frame['lineno'], 2) html = reporter.get_traceback_html() self.assertIn('generated in funcName, line 2', html) self.assertIn( '"generated", line 2, in funcName\n' ' &lt;source code not available&gt;', html, ) text = reporter.get_traceback_text() self.assertIn( '"generated", line 2, in funcName\n' ' <source code not available>', text, ) def test_reporting_frames_source_not_match(self): try: source = "def funcName():\n raise Error('Whoops')\nfuncName()" namespace = {} code = compile(source, 'generated', 'exec') exec(code, namespace) except Exception: exc_type, exc_value, tb = sys.exc_info() with mock.patch( 'django.views.debug.ExceptionReporter._get_source', return_value=['wrong source'], ): request = self.rf.get('/test_view/') reporter = ExceptionReporter(request, exc_type, exc_value, tb) frames = reporter.get_traceback_frames() last_frame = frames[-1] self.assertEqual(last_frame['context_line'], '<source code not available>') self.assertEqual(last_frame['filename'], 'generated') self.assertEqual(last_frame['function'], 'funcName') self.assertEqual(last_frame['lineno'], 2) html = reporter.get_traceback_html() self.assertIn('generated in funcName, line 2', html) self.assertIn( '"generated", line 2, in funcName\n' ' &lt;source code not available&gt;', html, ) text = reporter.get_traceback_text() self.assertIn( '"generated", line 2, in funcName\n' ' <source code not available>', text, ) def test_reporting_frames_for_cyclic_reference(self): try: def test_func(): try: raise RuntimeError('outer') from RuntimeError('inner') except RuntimeError as exc: raise exc.__cause__ test_func() except Exception: exc_type, exc_value, tb = sys.exc_info() request = self.rf.get('/test_view/') reporter = ExceptionReporter(request, exc_type, exc_value, tb) def generate_traceback_frames(*args, **kwargs): nonlocal tb_frames tb_frames = reporter.get_traceback_frames() tb_frames = None tb_generator = threading.Thread(target=generate_traceback_frames, daemon=True) tb_generator.start() tb_generator.join(timeout=5) if tb_generator.is_alive(): # tb_generator is a daemon that runs until the main thread/process # exits. This is resource heavy when running the full test suite. # Setting the following values to None makes # reporter.get_traceback_frames() exit early. exc_value.__traceback__ = exc_value.__context__ = exc_value.__cause__ = None tb_generator.join() self.fail('Cyclic reference in Exception Reporter.get_traceback_frames()') if tb_frames is None: # can happen if the thread generating traceback got killed # or exception while generating the traceback self.fail('Traceback generation failed') last_frame = tb_frames[-1] self.assertIn('raise exc.__cause__', last_frame['context_line']) self.assertEqual(last_frame['filename'], __file__) self.assertEqual(last_frame['function'], 'test_func') def test_request_and_message(self): "A message can be provided in addition to a request" request = self.rf.get('/test_view/') reporter = ExceptionReporter(request, None, "I'm a little teapot", None) html = reporter.get_traceback_html() self.assertInHTML('<h1>Report at /test_view/</h1>', html) self.assertIn('<pre class="exception_value">I&#x27;m a little teapot</pre>', html) self.assertIn('<th>Request Method:</th>', html) self.assertIn('<th>Request URL:</th>', html) self.assertNotIn('<th>Exception Type:</th>', html) self.assertNotIn('<th>Exception Value:</th>', html) self.assertNotIn('<h2>Traceback ', html) self.assertIn('<h2>Request information</h2>', html) self.assertNotIn('<p>Request data not supplied</p>', html) def test_message_only(self): reporter = ExceptionReporter(None, None, "I'm a little teapot", None) html = reporter.get_traceback_html() self.assertInHTML('<h1>Report</h1>', html) self.assertIn('<pre class="exception_value">I&#x27;m a little teapot</pre>', html) self.assertNotIn('<th>Request Method:</th>', html) self.assertNotIn('<th>Request URL:</th>', html) self.assertNotIn('<th>Exception Type:</th>', html) self.assertNotIn('<th>Exception Value:</th>', html) self.assertNotIn('<h2>Traceback ', html) self.assertIn('<h2>Request information</h2>', html) self.assertIn('<p>Request data not supplied</p>', html) def test_non_utf8_values_handling(self): "Non-UTF-8 exceptions/values should not make the output generation choke." try: class NonUtf8Output(Exception): def __repr__(self): return b'EXC\xe9EXC' somevar = b'VAL\xe9VAL' # NOQA raise NonUtf8Output() except Exception: exc_type, exc_value, tb = sys.exc_info() reporter = ExceptionReporter(None, exc_type, exc_value, tb) html = reporter.get_traceback_html() self.assertIn('VAL\\xe9VAL', html) self.assertIn('EXC\\xe9EXC', html) def test_local_variable_escaping(self): """Safe strings in local variables are escaped.""" try: local = mark_safe('<p>Local variable</p>') raise ValueError(local) except Exception: exc_type, exc_value, tb = sys.exc_info() html = ExceptionReporter(None, exc_type, exc_value, tb).get_traceback_html() self.assertIn('<td class="code"><pre>&#x27;&lt;p&gt;Local variable&lt;/p&gt;&#x27;</pre></td>', html) def test_unprintable_values_handling(self): "Unprintable values should not make the output generation choke." try: class OomOutput: def __repr__(self): raise MemoryError('OOM') oomvalue = OomOutput() # NOQA raise ValueError() except Exception: exc_type, exc_value, tb = sys.exc_info() reporter = ExceptionReporter(None, exc_type, exc_value, tb) html = reporter.get_traceback_html() self.assertIn('<td class="code"><pre>Error in formatting', html) def test_too_large_values_handling(self): "Large values should not create a large HTML." large = 256 * 1024 repr_of_str_adds = len(repr('')) try: class LargeOutput: def __repr__(self): return repr('A' * large) largevalue = LargeOutput() # NOQA raise ValueError() except Exception: exc_type, exc_value, tb = sys.exc_info() reporter = ExceptionReporter(None, exc_type, exc_value, tb) html = reporter.get_traceback_html() self.assertEqual(len(html) // 1024 // 128, 0) # still fit in 128Kb self.assertIn('&lt;trimmed %d bytes string&gt;' % (large + repr_of_str_adds,), html) def test_encoding_error(self): """ A UnicodeError displays a portion of the problematic string. HTML in safe strings is escaped. """ try: mark_safe('abcdefghijkl<p>mnὀp</p>qrstuwxyz').encode('ascii') except Exception: exc_type, exc_value, tb = sys.exc_info() reporter = ExceptionReporter(None, exc_type, exc_value, tb) html = reporter.get_traceback_html() self.assertIn('<h2>Unicode error hint</h2>', html) self.assertIn('The string that could not be encoded/decoded was: ', html) self.assertIn('<strong>&lt;p&gt;mnὀp&lt;/p&gt;</strong>', html) def test_unfrozen_importlib(self): """ importlib is not a frozen app, but its loader thinks it's frozen which results in an ImportError. Refs #21443. """ try: request = self.rf.get('/test_view/') importlib.import_module('abc.def.invalid.name') except Exception: exc_type, exc_value, tb = sys.exc_info() reporter = ExceptionReporter(request, exc_type, exc_value, tb) html = reporter.get_traceback_html() self.assertInHTML('<h1>ModuleNotFoundError at /test_view/</h1>', html) def test_ignore_traceback_evaluation_exceptions(self): """ Don't trip over exceptions generated by crafted objects when evaluating them while cleansing (#24455). """ class BrokenEvaluation(Exception): pass def broken_setup(): raise BrokenEvaluation request = self.rf.get('/test_view/') broken_lazy = SimpleLazyObject(broken_setup) try: bool(broken_lazy) except BrokenEvaluation: exc_type, exc_value, tb = sys.exc_info() self.assertIn( "BrokenEvaluation", ExceptionReporter(request, exc_type, exc_value, tb).get_traceback_html(), "Evaluation exception reason not mentioned in traceback" ) @override_settings(ALLOWED_HOSTS='example.com') def test_disallowed_host(self): "An exception report can be generated even for a disallowed host." request = self.rf.get('/', HTTP_HOST='evil.com') reporter = ExceptionReporter(request, None, None, None) html = reporter.get_traceback_html() self.assertIn("http://evil.com/", html) def test_request_with_items_key(self): """ An exception report can be generated for requests with 'items' in request GET, POST, FILES, or COOKIES QueryDicts. """ value = '<td>items</td><td class="code"><pre>&#x27;Oops&#x27;</pre></td>' # GET request = self.rf.get('/test_view/?items=Oops') reporter = ExceptionReporter(request, None, None, None) html = reporter.get_traceback_html() self.assertInHTML(value, html) # POST request = self.rf.post('/test_view/', data={'items': 'Oops'}) reporter = ExceptionReporter(request, None, None, None) html = reporter.get_traceback_html() self.assertInHTML(value, html) # FILES fp = StringIO('filecontent') request = self.rf.post('/test_view/', data={'name': 'filename', 'items': fp}) reporter = ExceptionReporter(request, None, None, None) html = reporter.get_traceback_html() self.assertInHTML( '<td>items</td><td class="code"><pre>&lt;InMemoryUploadedFile: ' 'items (application/octet-stream)&gt;</pre></td>', html ) # COOKIES rf = RequestFactory() rf.cookies['items'] = 'Oops' request = rf.get('/test_view/') reporter = ExceptionReporter(request, None, None, None) html = reporter.get_traceback_html() self.assertInHTML('<td>items</td><td class="code"><pre>&#x27;Oops&#x27;</pre></td>', html) def test_exception_fetching_user(self): """ The error page can be rendered if the current user can't be retrieved (such as when the database is unavailable). """ class ExceptionUser: def __str__(self): raise Exception() request = self.rf.get('/test_view/') request.user = ExceptionUser() try: raise ValueError('Oops') except ValueError: exc_type, exc_value, tb = sys.exc_info() reporter = ExceptionReporter(request, exc_type, exc_value, tb) html = reporter.get_traceback_html() self.assertInHTML('<h1>ValueError at /test_view/</h1>', html) self.assertIn('<pre class="exception_value">Oops</pre>', html) self.assertIn('<h3 id="user-info">USER</h3>', html) self.assertIn('<p>[unable to retrieve the current user]</p>', html) text = reporter.get_traceback_text() self.assertIn('USER: [unable to retrieve the current user]', text) def test_template_encoding(self): """ The templates are loaded directly, not via a template loader, and should be opened as utf-8 charset as is the default specified on template engines. """ reporter = ExceptionReporter(None, None, None, None) with mock.patch.object(DebugPath, 'open') as m: reporter.get_traceback_html() m.assert_called_once_with(encoding='utf-8') m.reset_mock() reporter.get_traceback_text() m.assert_called_once_with(encoding='utf-8') class PlainTextReportTests(SimpleTestCase): rf = RequestFactory() def test_request_and_exception(self): "A simple exception report can be generated" try: request = self.rf.get('/test_view/') request.user = User() raise ValueError("Can't find my keys") except ValueError: exc_type, exc_value, tb = sys.exc_info() reporter = ExceptionReporter(request, exc_type, exc_value, tb) text = reporter.get_traceback_text() self.assertIn('ValueError at /test_view/', text) self.assertIn("Can't find my keys", text) self.assertIn('Request Method:', text) self.assertIn('Request URL:', text) self.assertIn('USER: jacob', text) self.assertIn('Exception Type:', text) self.assertIn('Exception Value:', text) self.assertIn('Traceback (most recent call last):', text) self.assertIn('Request information:', text) self.assertNotIn('Request data not supplied', text) def test_no_request(self): "An exception report can be generated without request" try: raise ValueError("Can't find my keys") except ValueError: exc_type, exc_value, tb = sys.exc_info() reporter = ExceptionReporter(None, exc_type, exc_value, tb) text = reporter.get_traceback_text() self.assertIn('ValueError', text) self.assertIn("Can't find my keys", text) self.assertNotIn('Request Method:', text) self.assertNotIn('Request URL:', text) self.assertNotIn('USER:', text) self.assertIn('Exception Type:', text) self.assertIn('Exception Value:', text) self.assertIn('Traceback (most recent call last):', text) self.assertIn('Request data not supplied', text) def test_no_exception(self): "An exception report can be generated for just a request" request = self.rf.get('/test_view/') reporter = ExceptionReporter(request, None, None, None) reporter.get_traceback_text() def test_request_and_message(self): "A message can be provided in addition to a request" request = self.rf.get('/test_view/') reporter = ExceptionReporter(request, None, "I'm a little teapot", None) reporter.get_traceback_text() @override_settings(DEBUG=True) def test_template_exception(self): request = self.rf.get('/test_view/') try: render(request, 'debug/template_error.html') except Exception: exc_type, exc_value, tb = sys.exc_info() reporter = ExceptionReporter(request, exc_type, exc_value, tb) text = reporter.get_traceback_text() templ_path = Path(Path(__file__).parents[1], 'templates', 'debug', 'template_error.html') self.assertIn( 'Template error:\n' 'In template %(path)s, error at line 2\n' ' \'cycle\' tag requires at least two arguments\n' ' 1 : Template with error:\n' ' 2 : {%% cycle %%} \n' ' 3 : ' % {'path': templ_path}, text ) def test_request_with_items_key(self): """ An exception report can be generated for requests with 'items' in request GET, POST, FILES, or COOKIES QueryDicts. """ # GET request = self.rf.get('/test_view/?items=Oops') reporter = ExceptionReporter(request, None, None, None) text = reporter.get_traceback_text() self.assertIn("items = 'Oops'", text) # POST request = self.rf.post('/test_view/', data={'items': 'Oops'}) reporter = ExceptionReporter(request, None, None, None) text = reporter.get_traceback_text() self.assertIn("items = 'Oops'", text) # FILES fp = StringIO('filecontent') request = self.rf.post('/test_view/', data={'name': 'filename', 'items': fp}) reporter = ExceptionReporter(request, None, None, None) text = reporter.get_traceback_text() self.assertIn('items = <InMemoryUploadedFile:', text) # COOKIES rf = RequestFactory() rf.cookies['items'] = 'Oops' request = rf.get('/test_view/') reporter = ExceptionReporter(request, None, None, None) text = reporter.get_traceback_text() self.assertIn("items = 'Oops'", text) def test_message_only(self): reporter = ExceptionReporter(None, None, "I'm a little teapot", None) reporter.get_traceback_text() @override_settings(ALLOWED_HOSTS='example.com') def test_disallowed_host(self): "An exception report can be generated even for a disallowed host." request = self.rf.get('/', HTTP_HOST='evil.com') reporter = ExceptionReporter(request, None, None, None) text = reporter.get_traceback_text() self.assertIn("http://evil.com/", text) class ExceptionReportTestMixin: # Mixin used in the ExceptionReporterFilterTests and # AjaxResponseExceptionReporterFilter tests below breakfast_data = { 'sausage-key': 'sausage-value', 'baked-beans-key': 'baked-beans-value', 'hash-brown-key': 'hash-brown-value', 'bacon-key': 'bacon-value', } def verify_unsafe_response(self, view, check_for_vars=True, check_for_POST_params=True): """ Asserts that potentially sensitive info are displayed in the response. """ request = self.rf.post('/some_url/', self.breakfast_data) response = view(request) if check_for_vars: # All variables are shown. self.assertContains(response, 'cooked_eggs', status_code=500) self.assertContains(response, 'scrambled', status_code=500) self.assertContains(response, 'sauce', status_code=500) self.assertContains(response, 'worcestershire', status_code=500) if check_for_POST_params: for k, v in self.breakfast_data.items(): # All POST parameters are shown. self.assertContains(response, k, status_code=500) self.assertContains(response, v, status_code=500) def verify_safe_response(self, view, check_for_vars=True, check_for_POST_params=True): """ Asserts that certain sensitive info are not displayed in the response. """ request = self.rf.post('/some_url/', self.breakfast_data) response = view(request) if check_for_vars: # Non-sensitive variable's name and value are shown. self.assertContains(response, 'cooked_eggs', status_code=500) self.assertContains(response, 'scrambled', status_code=500) # Sensitive variable's name is shown but not its value. self.assertContains(response, 'sauce', status_code=500) self.assertNotContains(response, 'worcestershire', status_code=500) if check_for_POST_params: for k in self.breakfast_data: # All POST parameters' names are shown. self.assertContains(response, k, status_code=500) # Non-sensitive POST parameters' values are shown. self.assertContains(response, 'baked-beans-value', status_code=500) self.assertContains(response, 'hash-brown-value', status_code=500) # Sensitive POST parameters' values are not shown. self.assertNotContains(response, 'sausage-value', status_code=500) self.assertNotContains(response, 'bacon-value', status_code=500) def verify_paranoid_response(self, view, check_for_vars=True, check_for_POST_params=True): """ Asserts that no variables or POST parameters are displayed in the response. """ request = self.rf.post('/some_url/', self.breakfast_data) response = view(request) if check_for_vars: # Show variable names but not their values. self.assertContains(response, 'cooked_eggs', status_code=500) self.assertNotContains(response, 'scrambled', status_code=500) self.assertContains(response, 'sauce', status_code=500) self.assertNotContains(response, 'worcestershire', status_code=500) if check_for_POST_params: for k, v in self.breakfast_data.items(): # All POST parameters' names are shown. self.assertContains(response, k, status_code=500) # No POST parameters' values are shown. self.assertNotContains(response, v, status_code=500) def verify_unsafe_email(self, view, check_for_POST_params=True): """ Asserts that potentially sensitive info are displayed in the email report. """ with self.settings(ADMINS=[('Admin', '[email protected]')]): mail.outbox = [] # Empty outbox request = self.rf.post('/some_url/', self.breakfast_data) view(request) self.assertEqual(len(mail.outbox), 1) email = mail.outbox[0] # Frames vars are never shown in plain text email reports. body_plain = str(email.body) self.assertNotIn('cooked_eggs', body_plain) self.assertNotIn('scrambled', body_plain) self.assertNotIn('sauce', body_plain) self.assertNotIn('worcestershire', body_plain) # Frames vars are shown in html email reports. body_html = str(email.alternatives[0][0]) self.assertIn('cooked_eggs', body_html) self.assertIn('scrambled', body_html) self.assertIn('sauce', body_html) self.assertIn('worcestershire', body_html) if check_for_POST_params: for k, v in self.breakfast_data.items(): # All POST parameters are shown. self.assertIn(k, body_plain) self.assertIn(v, body_plain) self.assertIn(k, body_html) self.assertIn(v, body_html) def verify_safe_email(self, view, check_for_POST_params=True): """ Asserts that certain sensitive info are not displayed in the email report. """ with self.settings(ADMINS=[('Admin', '[email protected]')]): mail.outbox = [] # Empty outbox request = self.rf.post('/some_url/', self.breakfast_data) view(request) self.assertEqual(len(mail.outbox), 1) email = mail.outbox[0] # Frames vars are never shown in plain text email reports. body_plain = str(email.body) self.assertNotIn('cooked_eggs', body_plain) self.assertNotIn('scrambled', body_plain) self.assertNotIn('sauce', body_plain) self.assertNotIn('worcestershire', body_plain) # Frames vars are shown in html email reports. body_html = str(email.alternatives[0][0]) self.assertIn('cooked_eggs', body_html) self.assertIn('scrambled', body_html) self.assertIn('sauce', body_html) self.assertNotIn('worcestershire', body_html) if check_for_POST_params: for k in self.breakfast_data: # All POST parameters' names are shown. self.assertIn(k, body_plain) # Non-sensitive POST parameters' values are shown. self.assertIn('baked-beans-value', body_plain) self.assertIn('hash-brown-value', body_plain) self.assertIn('baked-beans-value', body_html) self.assertIn('hash-brown-value', body_html) # Sensitive POST parameters' values are not shown. self.assertNotIn('sausage-value', body_plain) self.assertNotIn('bacon-value', body_plain) self.assertNotIn('sausage-value', body_html) self.assertNotIn('bacon-value', body_html) def verify_paranoid_email(self, view): """ Asserts that no variables or POST parameters are displayed in the email report. """ with self.settings(ADMINS=[('Admin', '[email protected]')]): mail.outbox = [] # Empty outbox request = self.rf.post('/some_url/', self.breakfast_data) view(request) self.assertEqual(len(mail.outbox), 1) email = mail.outbox[0] # Frames vars are never shown in plain text email reports. body = str(email.body) self.assertNotIn('cooked_eggs', body) self.assertNotIn('scrambled', body) self.assertNotIn('sauce', body) self.assertNotIn('worcestershire', body) for k, v in self.breakfast_data.items(): # All POST parameters' names are shown. self.assertIn(k, body) # No POST parameters' values are shown. self.assertNotIn(v, body) @override_settings(ROOT_URLCONF='view_tests.urls') class ExceptionReporterFilterTests(ExceptionReportTestMixin, LoggingCaptureMixin, SimpleTestCase): """ Sensitive information can be filtered out of error reports (#14614). """ rf = RequestFactory() def test_non_sensitive_request(self): """ Everything (request info and frame variables) can bee seen in the default error reports for non-sensitive requests. """ with self.settings(DEBUG=True): self.verify_unsafe_response(non_sensitive_view) self.verify_unsafe_email(non_sensitive_view) with self.settings(DEBUG=False): self.verify_unsafe_response(non_sensitive_view) self.verify_unsafe_email(non_sensitive_view) def test_sensitive_request(self): """ Sensitive POST parameters and frame variables cannot be seen in the default error reports for sensitive requests. """ with self.settings(DEBUG=True): self.verify_unsafe_response(sensitive_view) self.verify_unsafe_email(sensitive_view) with self.settings(DEBUG=False): self.verify_safe_response(sensitive_view) self.verify_safe_email(sensitive_view) def test_paranoid_request(self): """ No POST parameters and frame variables can be seen in the default error reports for "paranoid" requests. """ with self.settings(DEBUG=True): self.verify_unsafe_response(paranoid_view) self.verify_unsafe_email(paranoid_view) with self.settings(DEBUG=False): self.verify_paranoid_response(paranoid_view) self.verify_paranoid_email(paranoid_view) def test_multivalue_dict_key_error(self): """ #21098 -- Sensitive POST parameters cannot be seen in the error reports for if request.POST['nonexistent_key'] throws an error. """ with self.settings(DEBUG=True): self.verify_unsafe_response(multivalue_dict_key_error) self.verify_unsafe_email(multivalue_dict_key_error) with self.settings(DEBUG=False): self.verify_safe_response(multivalue_dict_key_error) self.verify_safe_email(multivalue_dict_key_error) def test_custom_exception_reporter_filter(self): """ It's possible to assign an exception reporter filter to the request to bypass the one set in DEFAULT_EXCEPTION_REPORTER_FILTER. """ with self.settings(DEBUG=True): self.verify_unsafe_response(custom_exception_reporter_filter_view) self.verify_unsafe_email(custom_exception_reporter_filter_view) with self.settings(DEBUG=False): self.verify_unsafe_response(custom_exception_reporter_filter_view) self.verify_unsafe_email(custom_exception_reporter_filter_view) def test_sensitive_method(self): """ The sensitive_variables decorator works with object methods. """ with self.settings(DEBUG=True): self.verify_unsafe_response(sensitive_method_view, check_for_POST_params=False) self.verify_unsafe_email(sensitive_method_view, check_for_POST_params=False) with self.settings(DEBUG=False): self.verify_safe_response(sensitive_method_view, check_for_POST_params=False) self.verify_safe_email(sensitive_method_view, check_for_POST_params=False) def test_sensitive_function_arguments(self): """ Sensitive variables don't leak in the sensitive_variables decorator's frame, when those variables are passed as arguments to the decorated function. """ with self.settings(DEBUG=True): self.verify_unsafe_response(sensitive_args_function_caller) self.verify_unsafe_email(sensitive_args_function_caller) with self.settings(DEBUG=False): self.verify_safe_response(sensitive_args_function_caller, check_for_POST_params=False) self.verify_safe_email(sensitive_args_function_caller, check_for_POST_params=False) def test_sensitive_function_keyword_arguments(self): """ Sensitive variables don't leak in the sensitive_variables decorator's frame, when those variables are passed as keyword arguments to the decorated function. """ with self.settings(DEBUG=True): self.verify_unsafe_response(sensitive_kwargs_function_caller) self.verify_unsafe_email(sensitive_kwargs_function_caller) with self.settings(DEBUG=False): self.verify_safe_response(sensitive_kwargs_function_caller, check_for_POST_params=False) self.verify_safe_email(sensitive_kwargs_function_caller, check_for_POST_params=False) def test_callable_settings(self): """ Callable settings should not be evaluated in the debug page (#21345). """ def callable_setting(): return "This should not be displayed" with self.settings(DEBUG=True, FOOBAR=callable_setting): response = self.client.get('/raises500/') self.assertNotContains(response, "This should not be displayed", status_code=500) def test_callable_settings_forbidding_to_set_attributes(self): """ Callable settings which forbid to set attributes should not break the debug page (#23070). """ class CallableSettingWithSlots: __slots__ = [] def __call__(self): return "This should not be displayed" with self.settings(DEBUG=True, WITH_SLOTS=CallableSettingWithSlots()): response = self.client.get('/raises500/') self.assertNotContains(response, "This should not be displayed", status_code=500) def test_dict_setting_with_non_str_key(self): """ A dict setting containing a non-string key should not break the debug page (#12744). """ with self.settings(DEBUG=True, FOOBAR={42: None}): response = self.client.get('/raises500/') self.assertContains(response, 'FOOBAR', status_code=500) def test_sensitive_settings(self): """ The debug page should not show some sensitive settings (password, secret key, ...). """ sensitive_settings = [ 'SECRET_KEY', 'PASSWORD', 'API_KEY', 'AUTH_TOKEN', ] for setting in sensitive_settings: with self.settings(DEBUG=True, **{setting: "should not be displayed"}): response = self.client.get('/raises500/') self.assertNotContains(response, 'should not be displayed', status_code=500) def test_settings_with_sensitive_keys(self): """ The debug page should filter out some sensitive information found in dict settings. """ sensitive_settings = [ 'SECRET_KEY', 'PASSWORD', 'API_KEY', 'AUTH_TOKEN', ] for setting in sensitive_settings: FOOBAR = { setting: "should not be displayed", 'recursive': {setting: "should not be displayed"}, } with self.settings(DEBUG=True, FOOBAR=FOOBAR): response = self.client.get('/raises500/') self.assertNotContains(response, 'should not be displayed', status_code=500) def test_cleanse_setting_basic(self): reporter_filter = SafeExceptionReporterFilter() self.assertEqual(reporter_filter.cleanse_setting('TEST', 'TEST'), 'TEST') self.assertEqual( reporter_filter.cleanse_setting('PASSWORD', 'super_secret'), reporter_filter.cleansed_substitute, ) def test_cleanse_setting_ignore_case(self): reporter_filter = SafeExceptionReporterFilter() self.assertEqual( reporter_filter.cleanse_setting('password', 'super_secret'), reporter_filter.cleansed_substitute, ) def test_cleanse_setting_recurses_in_dictionary(self): reporter_filter = SafeExceptionReporterFilter() initial = {'login': 'cooper', 'password': 'secret'} self.assertEqual( reporter_filter.cleanse_setting('SETTING_NAME', initial), {'login': 'cooper', 'password': reporter_filter.cleansed_substitute}, ) def test_request_meta_filtering(self): request = self.rf.get('/', HTTP_SECRET_HEADER='super_secret') reporter_filter = SafeExceptionReporterFilter() self.assertEqual( reporter_filter.get_safe_request_meta(request)['HTTP_SECRET_HEADER'], reporter_filter.cleansed_substitute, ) def test_exception_report_uses_meta_filtering(self): response = self.client.get('/raises500/', HTTP_SECRET_HEADER='super_secret') self.assertNotIn(b'super_secret', response.content) response = self.client.get( '/raises500/', HTTP_SECRET_HEADER='super_secret', HTTP_X_REQUESTED_WITH='XMLHttpRequest', ) self.assertNotIn(b'super_secret', response.content) class CustomExceptionReporterFilter(SafeExceptionReporterFilter): cleansed_substitute = 'XXXXXXXXXXXXXXXXXXXX' hidden_settings = _lazy_re_compile('API|TOKEN|KEY|SECRET|PASS|SIGNATURE|DATABASE_URL', flags=re.I) @override_settings( ROOT_URLCONF='view_tests.urls', DEFAULT_EXCEPTION_REPORTER_FILTER='%s.CustomExceptionReporterFilter' % __name__, ) class CustomExceptionReporterFilterTests(SimpleTestCase): def setUp(self): get_default_exception_reporter_filter.cache_clear() def tearDown(self): get_default_exception_reporter_filter.cache_clear() def test_setting_allows_custom_subclass(self): self.assertIsInstance( get_default_exception_reporter_filter(), CustomExceptionReporterFilter, ) def test_cleansed_substitute_override(self): reporter_filter = get_default_exception_reporter_filter() self.assertEqual( reporter_filter.cleanse_setting('password', 'super_secret'), reporter_filter.cleansed_substitute, ) def test_hidden_settings_override(self): reporter_filter = get_default_exception_reporter_filter() self.assertEqual( reporter_filter.cleanse_setting('database_url', 'super_secret'), reporter_filter.cleansed_substitute, ) class AjaxResponseExceptionReporterFilter(ExceptionReportTestMixin, LoggingCaptureMixin, SimpleTestCase): """ Sensitive information can be filtered out of error reports. Here we specifically test the plain text 500 debug-only error page served when it has been detected the request was sent by JS code. We don't check for (non)existence of frames vars in the traceback information section of the response content because we don't include them in these error pages. Refs #14614. """ rf = RequestFactory(HTTP_X_REQUESTED_WITH='XMLHttpRequest') def test_non_sensitive_request(self): """ Request info can bee seen in the default error reports for non-sensitive requests. """ with self.settings(DEBUG=True): self.verify_unsafe_response(non_sensitive_view, check_for_vars=False) with self.settings(DEBUG=False): self.verify_unsafe_response(non_sensitive_view, check_for_vars=False) def test_sensitive_request(self): """ Sensitive POST parameters cannot be seen in the default error reports for sensitive requests. """ with self.settings(DEBUG=True): self.verify_unsafe_response(sensitive_view, check_for_vars=False) with self.settings(DEBUG=False): self.verify_safe_response(sensitive_view, check_for_vars=False) def test_paranoid_request(self): """ No POST parameters can be seen in the default error reports for "paranoid" requests. """ with self.settings(DEBUG=True): self.verify_unsafe_response(paranoid_view, check_for_vars=False) with self.settings(DEBUG=False): self.verify_paranoid_response(paranoid_view, check_for_vars=False) def test_custom_exception_reporter_filter(self): """ It's possible to assign an exception reporter filter to the request to bypass the one set in DEFAULT_EXCEPTION_REPORTER_FILTER. """ with self.settings(DEBUG=True): self.verify_unsafe_response(custom_exception_reporter_filter_view, check_for_vars=False) with self.settings(DEBUG=False): self.verify_unsafe_response(custom_exception_reporter_filter_view, check_for_vars=False) @override_settings(DEBUG=True, ROOT_URLCONF='view_tests.urls') def test_ajax_response_encoding(self): response = self.client.get('/raises500/', HTTP_X_REQUESTED_WITH='XMLHttpRequest') self.assertEqual(response['Content-Type'], 'text/plain; charset=utf-8') class DecoratorsTests(SimpleTestCase): def test_sensitive_variables_not_called(self): msg = ( 'sensitive_variables() must be called to use it as a decorator, ' 'e.g., use @sensitive_variables(), not @sensitive_variables.' ) with self.assertRaisesMessage(TypeError, msg): @sensitive_variables def test_func(password): pass def test_sensitive_post_parameters_not_called(self): msg = ( 'sensitive_post_parameters() must be called to use it as a ' 'decorator, e.g., use @sensitive_post_parameters(), not ' '@sensitive_post_parameters.' ) with self.assertRaisesMessage(TypeError, msg): @sensitive_post_parameters def test_func(request): return index_page(request)
2266f441976aa3b1e0bffac224ed7cf1647dcdb1bf0cc1146a2c48cb00039a11
import operator from django.template import Engine, Library engine = Engine(app_dirs=True) register = Library() @register.inclusion_tag('inclusion.html') def inclusion_no_params(): """Expected inclusion_no_params __doc__""" return {"result": "inclusion_no_params - Expected result"} inclusion_no_params.anything = "Expected inclusion_no_params __dict__" @register.inclusion_tag(engine.get_template('inclusion.html')) def inclusion_no_params_from_template(): """Expected inclusion_no_params_from_template __doc__""" return {"result": "inclusion_no_params_from_template - Expected result"} inclusion_no_params_from_template.anything = "Expected inclusion_no_params_from_template __dict__" @register.inclusion_tag('inclusion.html') def inclusion_one_param(arg): """Expected inclusion_one_param __doc__""" return {"result": "inclusion_one_param - Expected result: %s" % arg} inclusion_one_param.anything = "Expected inclusion_one_param __dict__" @register.inclusion_tag(engine.get_template('inclusion.html')) def inclusion_one_param_from_template(arg): """Expected inclusion_one_param_from_template __doc__""" return {"result": "inclusion_one_param_from_template - Expected result: %s" % arg} inclusion_one_param_from_template.anything = "Expected inclusion_one_param_from_template __dict__" @register.inclusion_tag('inclusion.html', takes_context=False) def inclusion_explicit_no_context(arg): """Expected inclusion_explicit_no_context __doc__""" return {"result": "inclusion_explicit_no_context - Expected result: %s" % arg} inclusion_explicit_no_context.anything = "Expected inclusion_explicit_no_context __dict__" @register.inclusion_tag(engine.get_template('inclusion.html'), takes_context=False) def inclusion_explicit_no_context_from_template(arg): """Expected inclusion_explicit_no_context_from_template __doc__""" return {"result": "inclusion_explicit_no_context_from_template - Expected result: %s" % arg} inclusion_explicit_no_context_from_template.anything = "Expected inclusion_explicit_no_context_from_template __dict__" @register.inclusion_tag('inclusion.html', takes_context=True) def inclusion_no_params_with_context(context): """Expected inclusion_no_params_with_context __doc__""" return {"result": "inclusion_no_params_with_context - Expected result (context value: %s)" % context['value']} inclusion_no_params_with_context.anything = "Expected inclusion_no_params_with_context __dict__" @register.inclusion_tag(engine.get_template('inclusion.html'), takes_context=True) def inclusion_no_params_with_context_from_template(context): """Expected inclusion_no_params_with_context_from_template __doc__""" return { "result": ( "inclusion_no_params_with_context_from_template - Expected result (context value: %s)" % context['value'] ) } inclusion_no_params_with_context_from_template.anything = ( "Expected inclusion_no_params_with_context_from_template __dict__" ) @register.inclusion_tag('inclusion.html', takes_context=True) def inclusion_params_and_context(context, arg): """Expected inclusion_params_and_context __doc__""" return { "result": "inclusion_params_and_context - Expected result (context value: %s): %s" % (context['value'], arg) } inclusion_params_and_context.anything = "Expected inclusion_params_and_context __dict__" @register.inclusion_tag(engine.get_template('inclusion.html'), takes_context=True) def inclusion_params_and_context_from_template(context, arg): """Expected inclusion_params_and_context_from_template __doc__""" return { "result": ( "inclusion_params_and_context_from_template - Expected result " "(context value: %s): %s" % (context['value'], arg) ) } inclusion_params_and_context_from_template.anything = "Expected inclusion_params_and_context_from_template __dict__" @register.inclusion_tag('inclusion.html') def inclusion_two_params(one, two): """Expected inclusion_two_params __doc__""" return {"result": "inclusion_two_params - Expected result: %s, %s" % (one, two)} inclusion_two_params.anything = "Expected inclusion_two_params __dict__" @register.inclusion_tag(engine.get_template('inclusion.html')) def inclusion_two_params_from_template(one, two): """Expected inclusion_two_params_from_template __doc__""" return {"result": "inclusion_two_params_from_template - Expected result: %s, %s" % (one, two)} inclusion_two_params_from_template.anything = "Expected inclusion_two_params_from_template __dict__" @register.inclusion_tag('inclusion.html') def inclusion_one_default(one, two='hi'): """Expected inclusion_one_default __doc__""" return {"result": "inclusion_one_default - Expected result: %s, %s" % (one, two)} inclusion_one_default.anything = "Expected inclusion_one_default __dict__" @register.inclusion_tag('inclusion.html') def inclusion_keyword_only_default(*, kwarg=42): return { 'result': ( 'inclusion_keyword_only_default - Expected result: %s' % kwarg ), } @register.inclusion_tag(engine.get_template('inclusion.html')) def inclusion_one_default_from_template(one, two='hi'): """Expected inclusion_one_default_from_template __doc__""" return {"result": "inclusion_one_default_from_template - Expected result: %s, %s" % (one, two)} inclusion_one_default_from_template.anything = "Expected inclusion_one_default_from_template __dict__" @register.inclusion_tag('inclusion.html') def inclusion_unlimited_args(one, two='hi', *args): """Expected inclusion_unlimited_args __doc__""" return { "result": ( "inclusion_unlimited_args - Expected result: %s" % ( ', '.join(str(arg) for arg in [one, two, *args]) ) ) } inclusion_unlimited_args.anything = "Expected inclusion_unlimited_args __dict__" @register.inclusion_tag(engine.get_template('inclusion.html')) def inclusion_unlimited_args_from_template(one, two='hi', *args): """Expected inclusion_unlimited_args_from_template __doc__""" return { "result": ( "inclusion_unlimited_args_from_template - Expected result: %s" % ( ', '.join(str(arg) for arg in [one, two, *args]) ) ) } inclusion_unlimited_args_from_template.anything = "Expected inclusion_unlimited_args_from_template __dict__" @register.inclusion_tag('inclusion.html') def inclusion_only_unlimited_args(*args): """Expected inclusion_only_unlimited_args __doc__""" return { "result": "inclusion_only_unlimited_args - Expected result: %s" % ( ', '.join(str(arg) for arg in args) ) } inclusion_only_unlimited_args.anything = "Expected inclusion_only_unlimited_args __dict__" @register.inclusion_tag(engine.get_template('inclusion.html')) def inclusion_only_unlimited_args_from_template(*args): """Expected inclusion_only_unlimited_args_from_template __doc__""" return { "result": "inclusion_only_unlimited_args_from_template - Expected result: %s" % ( ', '.join(str(arg) for arg in args) ) } inclusion_only_unlimited_args_from_template.anything = "Expected inclusion_only_unlimited_args_from_template __dict__" @register.inclusion_tag('test_incl_tag_use_l10n.html', takes_context=True) def inclusion_tag_use_l10n(context): """Expected inclusion_tag_use_l10n __doc__""" return {} inclusion_tag_use_l10n.anything = "Expected inclusion_tag_use_l10n __dict__" @register.inclusion_tag('inclusion.html') def inclusion_unlimited_args_kwargs(one, two='hi', *args, **kwargs): """Expected inclusion_unlimited_args_kwargs __doc__""" # Sort the dictionary by key to guarantee the order for testing. sorted_kwarg = sorted(kwargs.items(), key=operator.itemgetter(0)) return {"result": "inclusion_unlimited_args_kwargs - Expected result: %s / %s" % ( ', '.join(str(arg) for arg in [one, two, *args]), ', '.join('%s=%s' % (k, v) for (k, v) in sorted_kwarg) )} inclusion_unlimited_args_kwargs.anything = "Expected inclusion_unlimited_args_kwargs __dict__" @register.inclusion_tag('inclusion.html', takes_context=True) def inclusion_tag_without_context_parameter(arg): """Expected inclusion_tag_without_context_parameter __doc__""" return {} inclusion_tag_without_context_parameter.anything = "Expected inclusion_tag_without_context_parameter __dict__" @register.inclusion_tag('inclusion_extends1.html') def inclusion_extends1(): return {} @register.inclusion_tag('inclusion_extends2.html') def inclusion_extends2(): return {}
49165a4be478c559fee59805b49fa8ba26d83ef8a4b611188c848484b538a283
#!/usr/bin/env python import argparse import atexit import copy import os import shutil import socket import subprocess import sys import tempfile import warnings try: import django except ImportError as e: raise RuntimeError( 'Django module not found, reference tests/README.rst for instructions.' ) from e else: from django.apps import apps from django.conf import settings from django.db import connection, connections from django.test import TestCase, TransactionTestCase from django.test.runner import default_test_processes from django.test.selenium import SeleniumTestCaseBase from django.test.utils import get_runner from django.utils.deprecation import RemovedInDjango40Warning from django.utils.log import DEFAULT_LOGGING from django.utils.version import PY37 try: import MySQLdb except ImportError: pass else: # Ignore informational warnings from QuerySet.explain(). warnings.filterwarnings('ignore', r'\(1003, *', category=MySQLdb.Warning) # Make deprecation warnings errors to ensure no usage of deprecated features. warnings.simplefilter("error", RemovedInDjango40Warning) # Make resource and runtime warning errors to ensure no usage of error prone # patterns. warnings.simplefilter("error", ResourceWarning) warnings.simplefilter("error", RuntimeWarning) # Ignore known warnings in test dependencies. warnings.filterwarnings("ignore", "'U' mode is deprecated", DeprecationWarning, module='docutils.io') RUNTESTS_DIR = os.path.abspath(os.path.dirname(__file__)) TEMPLATE_DIR = os.path.join(RUNTESTS_DIR, 'templates') # Create a specific subdirectory for the duration of the test suite. TMPDIR = tempfile.mkdtemp(prefix='django_') # Set the TMPDIR environment variable in addition to tempfile.tempdir # so that children processes inherit it. tempfile.tempdir = os.environ['TMPDIR'] = TMPDIR # Removing the temporary TMPDIR. atexit.register(shutil.rmtree, TMPDIR) SUBDIRS_TO_SKIP = [ 'data', 'import_error_package', 'test_runner_apps', ] ALWAYS_INSTALLED_APPS = [ 'django.contrib.contenttypes', 'django.contrib.auth', 'django.contrib.sites', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.admin.apps.SimpleAdminConfig', 'django.contrib.staticfiles', ] ALWAYS_MIDDLEWARE = [ 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', ] # Need to add the associated contrib app to INSTALLED_APPS in some cases to # avoid "RuntimeError: Model class X doesn't declare an explicit app_label # and isn't in an application in INSTALLED_APPS." CONTRIB_TESTS_TO_APPS = { 'flatpages_tests': 'django.contrib.flatpages', 'redirects_tests': 'django.contrib.redirects', } def get_test_modules(): modules = [] discovery_paths = [(None, RUNTESTS_DIR)] if connection.features.gis_enabled: # GIS tests are in nested apps discovery_paths.append(('gis_tests', os.path.join(RUNTESTS_DIR, 'gis_tests'))) else: SUBDIRS_TO_SKIP.append('gis_tests') for modpath, dirpath in discovery_paths: for f in os.scandir(dirpath): if ('.' not in f.name and os.path.basename(f.name) not in SUBDIRS_TO_SKIP and not f.is_file() and os.path.exists(os.path.join(f.path, '__init__.py'))): modules.append((modpath, f.name)) return modules def get_installed(): return [app_config.name for app_config in apps.get_app_configs()] def setup(verbosity, test_labels, parallel, start_at, start_after): # Reduce the given test labels to just the app module path. test_labels_set = set() for label in test_labels: bits = label.split('.')[:1] test_labels_set.add('.'.join(bits)) if verbosity >= 1: msg = "Testing against Django installed in '%s'" % os.path.dirname(django.__file__) max_parallel = default_test_processes() if parallel == 0 else parallel if max_parallel > 1: msg += " with up to %d processes" % max_parallel print(msg) # Force declaring available_apps in TransactionTestCase for faster tests. def no_available_apps(self): raise Exception("Please define available_apps in TransactionTestCase " "and its subclasses.") TransactionTestCase.available_apps = property(no_available_apps) TestCase.available_apps = None state = { 'INSTALLED_APPS': settings.INSTALLED_APPS, 'ROOT_URLCONF': getattr(settings, "ROOT_URLCONF", ""), 'TEMPLATES': settings.TEMPLATES, 'LANGUAGE_CODE': settings.LANGUAGE_CODE, 'STATIC_URL': settings.STATIC_URL, 'STATIC_ROOT': settings.STATIC_ROOT, 'MIDDLEWARE': settings.MIDDLEWARE, } # Redirect some settings for the duration of these tests. settings.INSTALLED_APPS = ALWAYS_INSTALLED_APPS settings.ROOT_URLCONF = 'urls' settings.STATIC_URL = '/static/' settings.STATIC_ROOT = os.path.join(TMPDIR, 'static') settings.TEMPLATES = [{ 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [TEMPLATE_DIR], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }] settings.LANGUAGE_CODE = 'en' settings.SITE_ID = 1 settings.MIDDLEWARE = ALWAYS_MIDDLEWARE settings.MIGRATION_MODULES = { # This lets us skip creating migrations for the test models as many of # them depend on one of the following contrib applications. 'auth': None, 'contenttypes': None, 'sessions': None, } log_config = copy.deepcopy(DEFAULT_LOGGING) # Filter out non-error logging so we don't have to capture it in lots of # tests. log_config['loggers']['django']['level'] = 'ERROR' settings.LOGGING = log_config settings.SILENCED_SYSTEM_CHECKS = [ 'fields.W342', # ForeignKey(unique=True) -> OneToOneField 'fields.W903', # NullBooleanField deprecated. ] # Load all the ALWAYS_INSTALLED_APPS. django.setup() # It would be nice to put this validation earlier but it must come after # django.setup() so that connection.features.gis_enabled can be accessed # without raising AppRegistryNotReady when running gis_tests in isolation # on some backends (e.g. PostGIS). if 'gis_tests' in test_labels_set and not connection.features.gis_enabled: print('Aborting: A GIS database backend is required to run gis_tests.') sys.exit(1) def _module_match_label(module_label, label): # Exact or ancestor match. return module_label == label or module_label.startswith(label + '.') # Load all the test model apps. test_modules = get_test_modules() found_start = not (start_at or start_after) installed_app_names = set(get_installed()) for modpath, module_name in test_modules: if modpath: module_label = modpath + '.' + module_name else: module_label = module_name if not found_start: if start_at and _module_match_label(module_label, start_at): found_start = True elif start_after and _module_match_label(module_label, start_after): found_start = True continue else: continue # if the module (or an ancestor) was named on the command line, or # no modules were named (i.e., run all), import # this module and add it to INSTALLED_APPS. module_found_in_labels = not test_labels or any( _module_match_label(module_label, label) for label in test_labels_set ) if module_name in CONTRIB_TESTS_TO_APPS and module_found_in_labels: settings.INSTALLED_APPS.append(CONTRIB_TESTS_TO_APPS[module_name]) if module_found_in_labels and module_label not in installed_app_names: if verbosity >= 2: print("Importing application %s" % module_name) settings.INSTALLED_APPS.append(module_label) # Add contrib.gis to INSTALLED_APPS if needed (rather than requiring # @override_settings(INSTALLED_APPS=...) on all test cases. gis = 'django.contrib.gis' if connection.features.gis_enabled and gis not in settings.INSTALLED_APPS: if verbosity >= 2: print("Importing application %s" % gis) settings.INSTALLED_APPS.append(gis) apps.set_installed_apps(settings.INSTALLED_APPS) return state def teardown(state): # Restore the old settings. for key, value in state.items(): setattr(settings, key, value) # Discard the multiprocessing.util finalizer that tries to remove a # temporary directory that's already removed by this script's # atexit.register(shutil.rmtree, TMPDIR) handler. Prevents # FileNotFoundError at the end of a test run (#27890). from multiprocessing.util import _finalizer_registry _finalizer_registry.pop((-100, 0), None) def actual_test_processes(parallel): if parallel == 0: # This doesn't work before django.setup() on some databases. if all(conn.features.can_clone_databases for conn in connections.all()): return default_test_processes() else: return 1 else: return parallel class ActionSelenium(argparse.Action): """ Validate the comma-separated list of requested browsers. """ def __call__(self, parser, namespace, values, option_string=None): browsers = values.split(',') for browser in browsers: try: SeleniumTestCaseBase.import_webdriver(browser) except ImportError: raise argparse.ArgumentError(self, "Selenium browser specification '%s' is not valid." % browser) setattr(namespace, self.dest, browsers) def django_tests(verbosity, interactive, failfast, keepdb, reverse, test_labels, debug_sql, parallel, tags, exclude_tags, test_name_patterns, start_at, start_after, pdb, buffer): state = setup(verbosity, test_labels, parallel, start_at, start_after) extra_tests = [] # Run the test suite, including the extra validation tests. if not hasattr(settings, 'TEST_RUNNER'): settings.TEST_RUNNER = 'django.test.runner.DiscoverRunner' TestRunner = get_runner(settings) test_runner = TestRunner( verbosity=verbosity, interactive=interactive, failfast=failfast, keepdb=keepdb, reverse=reverse, debug_sql=debug_sql, parallel=actual_test_processes(parallel), tags=tags, exclude_tags=exclude_tags, test_name_patterns=test_name_patterns, pdb=pdb, buffer=buffer, ) failures = test_runner.run_tests( test_labels or get_installed(), extra_tests=extra_tests, ) teardown(state) return failures def get_subprocess_args(options): subprocess_args = [ sys.executable, __file__, '--settings=%s' % options.settings ] if options.failfast: subprocess_args.append('--failfast') if options.verbosity: subprocess_args.append('--verbosity=%s' % options.verbosity) if not options.interactive: subprocess_args.append('--noinput') if options.tags: subprocess_args.append('--tag=%s' % options.tags) if options.exclude_tags: subprocess_args.append('--exclude_tag=%s' % options.exclude_tags) return subprocess_args def bisect_tests(bisection_label, options, test_labels, parallel, start_at, start_after): state = setup(options.verbosity, test_labels, parallel, start_at, start_after) test_labels = test_labels or get_installed() print('***** Bisecting test suite: %s' % ' '.join(test_labels)) # Make sure the bisection point isn't in the test list # Also remove tests that need to be run in specific combinations for label in [bisection_label, 'model_inheritance_same_model_name']: try: test_labels.remove(label) except ValueError: pass subprocess_args = get_subprocess_args(options) iteration = 1 while len(test_labels) > 1: midpoint = len(test_labels) // 2 test_labels_a = test_labels[:midpoint] + [bisection_label] test_labels_b = test_labels[midpoint:] + [bisection_label] print('***** Pass %da: Running the first half of the test suite' % iteration) print('***** Test labels: %s' % ' '.join(test_labels_a)) failures_a = subprocess.run(subprocess_args + test_labels_a) print('***** Pass %db: Running the second half of the test suite' % iteration) print('***** Test labels: %s' % ' '.join(test_labels_b)) print('') failures_b = subprocess.run(subprocess_args + test_labels_b) if failures_a.returncode and not failures_b.returncode: print("***** Problem found in first half. Bisecting again...") iteration += 1 test_labels = test_labels_a[:-1] elif failures_b.returncode and not failures_a.returncode: print("***** Problem found in second half. Bisecting again...") iteration += 1 test_labels = test_labels_b[:-1] elif failures_a.returncode and failures_b.returncode: print("***** Multiple sources of failure found") break else: print("***** No source of failure found... try pair execution (--pair)") break if len(test_labels) == 1: print("***** Source of error: %s" % test_labels[0]) teardown(state) def paired_tests(paired_test, options, test_labels, parallel, start_at, start_after): state = setup(options.verbosity, test_labels, parallel, start_at, start_after) test_labels = test_labels or get_installed() print('***** Trying paired execution') # Make sure the constant member of the pair isn't in the test list # Also remove tests that need to be run in specific combinations for label in [paired_test, 'model_inheritance_same_model_name']: try: test_labels.remove(label) except ValueError: pass subprocess_args = get_subprocess_args(options) for i, label in enumerate(test_labels): print('***** %d of %d: Check test pairing with %s' % ( i + 1, len(test_labels), label)) failures = subprocess.call(subprocess_args + [label, paired_test]) if failures: print('***** Found problem pair with %s' % label) return print('***** No problem pair found') teardown(state) if __name__ == "__main__": parser = argparse.ArgumentParser(description="Run the Django test suite.") parser.add_argument( 'modules', nargs='*', metavar='module', help='Optional path(s) to test modules; e.g. "i18n" or ' '"i18n.tests.TranslationTests.test_lazy_objects".', ) parser.add_argument( '-v', '--verbosity', default=1, type=int, choices=[0, 1, 2, 3], help='Verbosity level; 0=minimal output, 1=normal output, 2=all output', ) parser.add_argument( '--noinput', action='store_false', dest='interactive', help='Tells Django to NOT prompt the user for input of any kind.', ) parser.add_argument( '--failfast', action='store_true', help='Tells Django to stop running the test suite after first failed test.', ) parser.add_argument( '--keepdb', action='store_true', help='Tells Django to preserve the test database between runs.', ) parser.add_argument( '--settings', help='Python path to settings module, e.g. "myproject.settings". If ' 'this isn\'t provided, either the DJANGO_SETTINGS_MODULE ' 'environment variable or "test_sqlite" will be used.', ) parser.add_argument( '--bisect', help='Bisect the test suite to discover a test that causes a test ' 'failure when combined with the named test.', ) parser.add_argument( '--pair', help='Run the test suite in pairs with the named test to find problem pairs.', ) parser.add_argument( '--reverse', action='store_true', help='Sort test suites and test cases in opposite order to debug ' 'test side effects not apparent with normal execution lineup.', ) parser.add_argument( '--selenium', action=ActionSelenium, metavar='BROWSERS', help='A comma-separated list of browsers to run the Selenium tests against.', ) parser.add_argument( '--headless', action='store_true', help='Run selenium tests in headless mode, if the browser supports the option.', ) parser.add_argument( '--selenium-hub', help='A URL for a selenium hub instance to use in combination with --selenium.', ) parser.add_argument( '--external-host', default=socket.gethostname(), help='The external host that can be reached by the selenium hub instance when running Selenium ' 'tests via Selenium Hub.', ) parser.add_argument( '--debug-sql', action='store_true', help='Turn on the SQL query logger within tests.', ) parser.add_argument( '--parallel', nargs='?', default=0, type=int, const=default_test_processes(), metavar='N', help='Run tests using up to N parallel processes.', ) parser.add_argument( '--tag', dest='tags', action='append', help='Run only tests with the specified tags. Can be used multiple times.', ) parser.add_argument( '--exclude-tag', dest='exclude_tags', action='append', help='Do not run tests with the specified tag. Can be used multiple times.', ) parser.add_argument( '--start-after', dest='start_after', help='Run tests starting after the specified top-level module.', ) parser.add_argument( '--start-at', dest='start_at', help='Run tests starting at the specified top-level module.', ) parser.add_argument( '--pdb', action='store_true', help='Runs the PDB debugger on error or failure.' ) parser.add_argument( '-b', '--buffer', action='store_true', help='Discard output of passing tests.', ) if PY37: parser.add_argument( '-k', dest='test_name_patterns', action='append', help=( 'Only run test methods and classes matching test name pattern. ' 'Same as unittest -k option. Can be used multiple times.' ), ) options = parser.parse_args() using_selenium_hub = options.selenium and options.selenium_hub if options.selenium_hub and not options.selenium: parser.error('--selenium-hub and --external-host require --selenium to be used.') if using_selenium_hub and not options.external_host: parser.error('--selenium-hub and --external-host must be used together.') # Allow including a trailing slash on app_labels for tab completion convenience options.modules = [os.path.normpath(labels) for labels in options.modules] mutually_exclusive_options = [options.start_at, options.start_after, options.modules] enabled_module_options = [bool(option) for option in mutually_exclusive_options].count(True) if enabled_module_options > 1: print('Aborting: --start-at, --start-after, and test labels are mutually exclusive.') sys.exit(1) for opt_name in ['start_at', 'start_after']: opt_val = getattr(options, opt_name) if opt_val: if '.' in opt_val: print('Aborting: --%s must be a top-level module.' % opt_name.replace('_', '-')) sys.exit(1) setattr(options, opt_name, os.path.normpath(opt_val)) if options.settings: os.environ['DJANGO_SETTINGS_MODULE'] = options.settings else: os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'test_sqlite') options.settings = os.environ['DJANGO_SETTINGS_MODULE'] if options.selenium: if not options.tags: options.tags = ['selenium'] elif 'selenium' not in options.tags: options.tags.append('selenium') if options.selenium_hub: SeleniumTestCaseBase.selenium_hub = options.selenium_hub SeleniumTestCaseBase.external_host = options.external_host SeleniumTestCaseBase.headless = options.headless SeleniumTestCaseBase.browsers = options.selenium if options.bisect: bisect_tests( options.bisect, options, options.modules, options.parallel, options.start_at, options.start_after, ) elif options.pair: paired_tests( options.pair, options, options.modules, options.parallel, options.start_at, options.start_after, ) else: failures = django_tests( options.verbosity, options.interactive, options.failfast, options.keepdb, options.reverse, options.modules, options.debug_sql, options.parallel, options.tags, options.exclude_tags, getattr(options, 'test_name_patterns', None), options.start_at, options.start_after, options.pdb, options.buffer, ) if failures: sys.exit(1)
635f11cc57e73a2c55a855f057cc1d21c539c917f8f21e490f2260f1c8682c07
# Django documentation build configuration file, created by # sphinx-quickstart on Thu Mar 27 09:06:53 2008. # # This file is execfile()d with the current directory set to its containing dir. # # The contents of this file are pickled, so don't put values in the namespace # that aren't picklable (module imports are okay, they're removed automatically). # # All configuration values have a default; values that are commented out # serve to show the default. import sys from os.path import abspath, dirname, join # Workaround for sphinx-build recursion limit overflow: # pickle.dump(doctree, f, pickle.HIGHEST_PROTOCOL) # RuntimeError: maximum recursion depth exceeded while pickling an object # # Python's default allowed recursion depth is 1000 but this isn't enough for # building docs/ref/settings.txt sometimes. # https://groups.google.com/d/topic/sphinx-dev/MtRf64eGtv4/discussion sys.setrecursionlimit(2000) # Make sure we get the version of this copy of Django sys.path.insert(1, dirname(dirname(abspath(__file__)))) # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.append(abspath(join(dirname(__file__), "_ext"))) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. needs_sphinx = '1.6.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ "djangodocs", 'sphinx.ext.extlinks', "sphinx.ext.intersphinx", "sphinx.ext.viewcode", "sphinx.ext.autosectionlabel", ] # AutosectionLabel settings. # Uses a <page>:<label> schema which doesn't work for duplicate sub-section # labels, so set max depth. autosectionlabel_prefix_document = True autosectionlabel_maxdepth = 2 # Spelling check needs an additional module that is not installed by default. # Add it only if spelling check is requested so docs can be generated without it. if 'spelling' in sys.argv: extensions.append("sphinxcontrib.spelling") # Spelling language. spelling_lang = 'en_US' # Location of word list. spelling_word_list_filename = 'spelling_wordlist' # Add any paths that contain templates here, relative to this directory. # templates_path = [] # The suffix of source filenames. source_suffix = '.txt' # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'contents' # General substitutions. project = 'Django' copyright = 'Django Software Foundation and contributors' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '3.1' # The full version, including alpha/beta/rc tags. try: from django import VERSION, get_version except ImportError: release = version else: def django_release(): pep440ver = get_version() if VERSION[3:5] == ('alpha', 0) and 'dev' not in pep440ver: return pep440ver + '.dev' return pep440ver release = django_release() # The "development version" of Django django_next_version = '3.1' extlinks = { 'commit': ('https://github.com/django/django/commit/%s', ''), 'cve': ('https://nvd.nist.gov/view/vuln/detail?vulnId=%s', 'CVE-'), # A file or directory. GitHub redirects from blob to tree if needed. 'source': ('https://github.com/django/django/blob/master/%s', ''), 'ticket': ('https://code.djangoproject.com/ticket/%s', '#'), } # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # Location for .po/.mo translation files used when language is set locale_dirs = ['locale/'] # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build', '_theme'] # The reST default role (used for this markup: `text`) to use for all documents. default_role = "default-role-error" # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = False # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'trac' # Links to Python's docs should reference the most recent version of the 3.x # branch, which is located at this URL. intersphinx_mapping = { 'python': ('https://docs.python.org/3/', None), 'sphinx': ('https://www.sphinx-doc.org/en/master/', None), 'psycopg2': ('https://www.psycopg.org/docs/', None), } # Python's docs don't change every week. intersphinx_cache_limit = 90 # days # The 'versionadded' and 'versionchanged' directives are overridden. suppress_warnings = ['app.add_directive'] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = "djangodocs" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. html_theme_path = ["_theme"] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". # html_static_path = ["_static"] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. html_last_updated_fmt = '%b %d, %Y' # Content template for the index page. # html_index = '' # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'Djangodoc' modindex_common_prefix = ["django."] # Appended to every page rst_epilog = """ .. |django-users| replace:: :ref:`django-users <django-users-mailing-list>` .. |django-core-mentorship| replace:: :ref:`django-core-mentorship <django-core-mentorship-mailing-list>` .. |django-developers| replace:: :ref:`django-developers <django-developers-mailing-list>` .. |django-announce| replace:: :ref:`django-announce <django-announce-mailing-list>` .. |django-updates| replace:: :ref:`django-updates <django-updates-mailing-list>` """ # -- Options for LaTeX output -------------------------------------------------- latex_elements = { 'preamble': ( '\\DeclareUnicodeCharacter{2264}{\\ensuremath{\\le}}' '\\DeclareUnicodeCharacter{2265}{\\ensuremath{\\ge}}' '\\DeclareUnicodeCharacter{2665}{[unicode-heart]}' '\\DeclareUnicodeCharacter{2713}{[unicode-checkmark]}' ), } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, document class [howto/manual]). # latex_documents = [] latex_documents = [ ('contents', 'django.tex', 'Django Documentation', 'Django Software Foundation', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [( 'ref/django-admin', 'django-admin', 'Utility script for the Django Web framework', ['Django Software Foundation'], 1 )] # -- Options for Texinfo output ------------------------------------------------ # List of tuples (startdocname, targetname, title, author, dir_entry, # description, category, toctree_only) texinfo_documents = [( master_doc, "django", "", "", "Django", "Documentation of the Django framework", "Web development", False )] # -- Options for Epub output --------------------------------------------------- # Bibliographic Dublin Core info. epub_title = project epub_author = 'Django Software Foundation' epub_publisher = 'Django Software Foundation' epub_copyright = copyright # The basename for the epub file. It defaults to the project name. # epub_basename = 'Django' # The HTML theme for the epub output. Since the default themes are not optimized # for small screen space, using the same theme for HTML and epub output is # usually not wise. This defaults to 'epub', a theme designed to save visual # space. epub_theme = 'djangodocs-epub' # The language of the text. It defaults to the language option # or en if the language is not set. # epub_language = '' # The scheme of the identifier. Typical schemes are ISBN or URL. # epub_scheme = '' # The unique identifier of the text. This can be an ISBN number # or the project homepage. # epub_identifier = '' # A unique identification for the text. # epub_uid = '' # A tuple containing the cover image and cover page html template filenames. epub_cover = ('', 'epub-cover.html') # A sequence of (type, uri, title) tuples for the guide element of content.opf. # epub_guide = () # HTML files that should be inserted before the pages created by sphinx. # The format is a list of tuples containing the path and title. # epub_pre_files = [] # HTML files shat should be inserted after the pages created by sphinx. # The format is a list of tuples containing the path and title. # epub_post_files = [] # A list of files that should not be packed into the epub file. # epub_exclude_files = [] # The depth of the table of contents in toc.ncx. # epub_tocdepth = 3 # Allow duplicate toc entries. # epub_tocdup = True # Choose between 'default' and 'includehidden'. # epub_tocscope = 'default' # Fix unsupported image types using the PIL. # epub_fix_images = False # Scale large images. # epub_max_image_width = 0 # How to display URL addresses: 'footnote', 'no', or 'inline'. # epub_show_urls = 'inline' # If false, no index is generated. # epub_use_index = True
4a20f1f2421990f18afa01d9f9cb529a5df9566820dc4ef68fbe0e23cc9e496d
import threading import warnings import weakref from django.utils.deprecation import RemovedInDjango40Warning from django.utils.inspect import func_accepts_kwargs def _make_id(target): if hasattr(target, '__func__'): return (id(target.__self__), id(target.__func__)) return id(target) NONE_ID = _make_id(None) # A marker for caching NO_RECEIVERS = object() class Signal: """ Base class for all signals Internal attributes: receivers { receiverkey (id) : weakref(receiver) } """ def __init__(self, providing_args=None, use_caching=False): """ Create a new signal. """ self.receivers = [] if providing_args is not None: warnings.warn( 'The providing_args argument is deprecated. As it is purely ' 'documentational, it has no replacement. If you rely on this ' 'argument as documentation, you can move the text to a code ' 'comment or docstring.', RemovedInDjango40Warning, stacklevel=2, ) self.lock = threading.Lock() self.use_caching = use_caching # For convenience we create empty caches even if they are not used. # A note about caching: if use_caching is defined, then for each # distinct sender we cache the receivers that sender has in # 'sender_receivers_cache'. The cache is cleaned when .connect() or # .disconnect() is called and populated on send(). self.sender_receivers_cache = weakref.WeakKeyDictionary() if use_caching else {} self._dead_receivers = False def connect(self, receiver, sender=None, weak=True, dispatch_uid=None): """ Connect receiver to sender for signal. Arguments: receiver A function or an instance method which is to receive signals. Receivers must be hashable objects. If weak is True, then receiver must be weak referenceable. Receivers must be able to accept keyword arguments. If a receiver is connected with a dispatch_uid argument, it will not be added if another receiver was already connected with that dispatch_uid. sender The sender to which the receiver should respond. Must either be a Python object, or None to receive events from any sender. weak Whether to use weak references to the receiver. By default, the module will attempt to use weak references to the receiver objects. If this parameter is false, then strong references will be used. dispatch_uid An identifier used to uniquely identify a particular instance of a receiver. This will usually be a string, though it may be anything hashable. """ from django.conf import settings # If DEBUG is on, check that we got a good receiver if settings.configured and settings.DEBUG: assert callable(receiver), "Signal receivers must be callable." # Check for **kwargs if not func_accepts_kwargs(receiver): raise ValueError("Signal receivers must accept keyword arguments (**kwargs).") if dispatch_uid: lookup_key = (dispatch_uid, _make_id(sender)) else: lookup_key = (_make_id(receiver), _make_id(sender)) if weak: ref = weakref.ref receiver_object = receiver # Check for bound methods if hasattr(receiver, '__self__') and hasattr(receiver, '__func__'): ref = weakref.WeakMethod receiver_object = receiver.__self__ receiver = ref(receiver) weakref.finalize(receiver_object, self._remove_receiver) with self.lock: self._clear_dead_receivers() if not any(r_key == lookup_key for r_key, _ in self.receivers): self.receivers.append((lookup_key, receiver)) self.sender_receivers_cache.clear() def disconnect(self, receiver=None, sender=None, dispatch_uid=None): """ Disconnect receiver from sender for signal. If weak references are used, disconnect need not be called. The receiver will be removed from dispatch automatically. Arguments: receiver The registered receiver to disconnect. May be none if dispatch_uid is specified. sender The registered sender to disconnect dispatch_uid the unique identifier of the receiver to disconnect """ if dispatch_uid: lookup_key = (dispatch_uid, _make_id(sender)) else: lookup_key = (_make_id(receiver), _make_id(sender)) disconnected = False with self.lock: self._clear_dead_receivers() for index in range(len(self.receivers)): (r_key, _) = self.receivers[index] if r_key == lookup_key: disconnected = True del self.receivers[index] break self.sender_receivers_cache.clear() return disconnected def has_listeners(self, sender=None): return bool(self._live_receivers(sender)) def send(self, sender, **named): """ Send signal from sender to all connected receivers. If any receiver raises an error, the error propagates back through send, terminating the dispatch loop. So it's possible that all receivers won't be called if an error is raised. Arguments: sender The sender of the signal. Either a specific object or None. named Named arguments which will be passed to receivers. Return a list of tuple pairs [(receiver, response), ... ]. """ if not self.receivers or self.sender_receivers_cache.get(sender) is NO_RECEIVERS: return [] return [ (receiver, receiver(signal=self, sender=sender, **named)) for receiver in self._live_receivers(sender) ] def send_robust(self, sender, **named): """ Send signal from sender to all connected receivers catching errors. Arguments: sender The sender of the signal. Can be any Python object (normally one registered with a connect if you actually want something to occur). named Named arguments which will be passed to receivers. Return a list of tuple pairs [(receiver, response), ... ]. If any receiver raises an error (specifically any subclass of Exception), return the error instance as the result for that receiver. """ if not self.receivers or self.sender_receivers_cache.get(sender) is NO_RECEIVERS: return [] # Call each receiver with whatever arguments it can accept. # Return a list of tuple pairs [(receiver, response), ... ]. responses = [] for receiver in self._live_receivers(sender): try: response = receiver(signal=self, sender=sender, **named) except Exception as err: responses.append((receiver, err)) else: responses.append((receiver, response)) return responses def _clear_dead_receivers(self): # Note: caller is assumed to hold self.lock. if self._dead_receivers: self._dead_receivers = False self.receivers = [ r for r in self.receivers if not(isinstance(r[1], weakref.ReferenceType) and r[1]() is None) ] def _live_receivers(self, sender): """ Filter sequence of receivers to get resolved, live receivers. This checks for weak references and resolves them, then returning only live receivers. """ receivers = None if self.use_caching and not self._dead_receivers: receivers = self.sender_receivers_cache.get(sender) # We could end up here with NO_RECEIVERS even if we do check this case in # .send() prior to calling _live_receivers() due to concurrent .send() call. if receivers is NO_RECEIVERS: return [] if receivers is None: with self.lock: self._clear_dead_receivers() senderkey = _make_id(sender) receivers = [] for (receiverkey, r_senderkey), receiver in self.receivers: if r_senderkey == NONE_ID or r_senderkey == senderkey: receivers.append(receiver) if self.use_caching: if not receivers: self.sender_receivers_cache[sender] = NO_RECEIVERS else: # Note, we must cache the weakref versions. self.sender_receivers_cache[sender] = receivers non_weak_receivers = [] for receiver in receivers: if isinstance(receiver, weakref.ReferenceType): # Dereference the weak reference. receiver = receiver() if receiver is not None: non_weak_receivers.append(receiver) else: non_weak_receivers.append(receiver) return non_weak_receivers def _remove_receiver(self, receiver=None): # Mark that the self.receivers list has dead weakrefs. If so, we will # clean those up in connect, disconnect and _live_receivers while # holding self.lock. Note that doing the cleanup here isn't a good # idea, _remove_receiver() will be called as side effect of garbage # collection, and so the call can happen while we are already holding # self.lock. self._dead_receivers = True def receiver(signal, **kwargs): """ A decorator for connecting receivers to signals. Used by passing in the signal (or list of signals) and keyword arguments to connect:: @receiver(post_save, sender=MyModel) def signal_receiver(sender, **kwargs): ... @receiver([post_save, post_delete], sender=MyModel) def signals_receiver(sender, **kwargs): ... """ def _decorator(func): if isinstance(signal, (list, tuple)): for s in signal: s.connect(func, **kwargs) else: signal.connect(func, **kwargs) return func return _decorator
42d2984f12b4cfaa90424d4ce2afd04358f3b4971eec67ecfdb3dd3561e197af
"""Django Unit Test framework.""" from django.test.client import ( AsyncClient, AsyncRequestFactory, Client, RequestFactory, ) from django.test.testcases import ( LiveServerTestCase, SimpleTestCase, TestCase, TransactionTestCase, skipIfDBFeature, skipUnlessAnyDBFeature, skipUnlessDBFeature, ) from django.test.utils import ( ignore_warnings, modify_settings, override_settings, override_system_checks, tag, ) __all__ = [ 'AsyncClient', 'AsyncRequestFactory', 'Client', 'RequestFactory', 'TestCase', 'TransactionTestCase', 'SimpleTestCase', 'LiveServerTestCase', 'skipIfDBFeature', 'skipUnlessAnyDBFeature', 'skipUnlessDBFeature', 'ignore_warnings', 'modify_settings', 'override_settings', 'override_system_checks', 'tag', ]
37df7d23933102a0afe27d0670c58d2016ab6fa34d8c3eb2b278294ef58a74fb
import ctypes import itertools import logging import multiprocessing import os import pickle import textwrap import unittest from importlib import import_module from io import StringIO from django.core.management import call_command from django.db import connections from django.test import SimpleTestCase, TestCase from django.test.utils import ( setup_databases as _setup_databases, setup_test_environment, teardown_databases as _teardown_databases, teardown_test_environment, ) from django.utils.datastructures import OrderedSet from django.utils.version import PY37 try: import ipdb as pdb except ImportError: import pdb try: import tblib.pickling_support except ImportError: tblib = None class DebugSQLTextTestResult(unittest.TextTestResult): def __init__(self, stream, descriptions, verbosity): self.logger = logging.getLogger('django.db.backends') self.logger.setLevel(logging.DEBUG) super().__init__(stream, descriptions, verbosity) def startTest(self, test): self.debug_sql_stream = StringIO() self.handler = logging.StreamHandler(self.debug_sql_stream) self.logger.addHandler(self.handler) super().startTest(test) def stopTest(self, test): super().stopTest(test) self.logger.removeHandler(self.handler) if self.showAll: self.debug_sql_stream.seek(0) self.stream.write(self.debug_sql_stream.read()) self.stream.writeln(self.separator2) def addError(self, test, err): super().addError(test, err) self.debug_sql_stream.seek(0) self.errors[-1] = self.errors[-1] + (self.debug_sql_stream.read(),) def addFailure(self, test, err): super().addFailure(test, err) self.debug_sql_stream.seek(0) self.failures[-1] = self.failures[-1] + (self.debug_sql_stream.read(),) def addSubTest(self, test, subtest, err): super().addSubTest(test, subtest, err) if err is not None: self.debug_sql_stream.seek(0) errors = self.failures if issubclass(err[0], test.failureException) else self.errors errors[-1] = errors[-1] + (self.debug_sql_stream.read(),) def printErrorList(self, flavour, errors): for test, err, sql_debug in errors: self.stream.writeln(self.separator1) self.stream.writeln("%s: %s" % (flavour, self.getDescription(test))) self.stream.writeln(self.separator2) self.stream.writeln(err) self.stream.writeln(self.separator2) self.stream.writeln(sql_debug) class PDBDebugResult(unittest.TextTestResult): """ Custom result class that triggers a PDB session when an error or failure occurs. """ def addError(self, test, err): super().addError(test, err) self.debug(err) def addFailure(self, test, err): super().addFailure(test, err) self.debug(err) def debug(self, error): exc_type, exc_value, traceback = error print("\nOpening PDB: %r" % exc_value) pdb.post_mortem(traceback) class RemoteTestResult: """ Record information about which tests have succeeded and which have failed. The sole purpose of this class is to record events in the child processes so they can be replayed in the master process. As a consequence it doesn't inherit unittest.TestResult and doesn't attempt to implement all its API. The implementation matches the unpythonic coding style of unittest2. """ def __init__(self): if tblib is not None: tblib.pickling_support.install() self.events = [] self.failfast = False self.shouldStop = False self.testsRun = 0 @property def test_index(self): return self.testsRun - 1 def _confirm_picklable(self, obj): """ Confirm that obj can be pickled and unpickled as multiprocessing will need to pickle the exception in the child process and unpickle it in the parent process. Let the exception rise, if not. """ pickle.loads(pickle.dumps(obj)) def _print_unpicklable_subtest(self, test, subtest, pickle_exc): print(""" Subtest failed: test: {} subtest: {} Unfortunately, the subtest that failed cannot be pickled, so the parallel test runner cannot handle it cleanly. Here is the pickling error: > {} You should re-run this test with --parallel=1 to reproduce the failure with a cleaner failure message. """.format(test, subtest, pickle_exc)) def check_picklable(self, test, err): # Ensure that sys.exc_info() tuples are picklable. This displays a # clear multiprocessing.pool.RemoteTraceback generated in the child # process instead of a multiprocessing.pool.MaybeEncodingError, making # the root cause easier to figure out for users who aren't familiar # with the multiprocessing module. Since we're in a forked process, # our best chance to communicate with them is to print to stdout. try: self._confirm_picklable(err) except Exception as exc: original_exc_txt = repr(err[1]) original_exc_txt = textwrap.fill(original_exc_txt, 75, initial_indent=' ', subsequent_indent=' ') pickle_exc_txt = repr(exc) pickle_exc_txt = textwrap.fill(pickle_exc_txt, 75, initial_indent=' ', subsequent_indent=' ') if tblib is None: print(""" {} failed: {} Unfortunately, tracebacks cannot be pickled, making it impossible for the parallel test runner to handle this exception cleanly. In order to see the traceback, you should install tblib: python -m pip install tblib """.format(test, original_exc_txt)) else: print(""" {} failed: {} Unfortunately, the exception it raised cannot be pickled, making it impossible for the parallel test runner to handle it cleanly. Here's the error encountered while trying to pickle the exception: {} You should re-run this test with the --parallel=1 option to reproduce the failure and get a correct traceback. """.format(test, original_exc_txt, pickle_exc_txt)) raise def check_subtest_picklable(self, test, subtest): try: self._confirm_picklable(subtest) except Exception as exc: self._print_unpicklable_subtest(test, subtest, exc) raise def stop_if_failfast(self): if self.failfast: self.stop() def stop(self): self.shouldStop = True def startTestRun(self): self.events.append(('startTestRun',)) def stopTestRun(self): self.events.append(('stopTestRun',)) def startTest(self, test): self.testsRun += 1 self.events.append(('startTest', self.test_index)) def stopTest(self, test): self.events.append(('stopTest', self.test_index)) def addError(self, test, err): self.check_picklable(test, err) self.events.append(('addError', self.test_index, err)) self.stop_if_failfast() def addFailure(self, test, err): self.check_picklable(test, err) self.events.append(('addFailure', self.test_index, err)) self.stop_if_failfast() def addSubTest(self, test, subtest, err): # Follow Python 3.5's implementation of unittest.TestResult.addSubTest() # by not doing anything when a subtest is successful. if err is not None: # Call check_picklable() before check_subtest_picklable() since # check_picklable() performs the tblib check. self.check_picklable(test, err) self.check_subtest_picklable(test, subtest) self.events.append(('addSubTest', self.test_index, subtest, err)) self.stop_if_failfast() def addSuccess(self, test): self.events.append(('addSuccess', self.test_index)) def addSkip(self, test, reason): self.events.append(('addSkip', self.test_index, reason)) def addExpectedFailure(self, test, err): # If tblib isn't installed, pickling the traceback will always fail. # However we don't want tblib to be required for running the tests # when they pass or fail as expected. Drop the traceback when an # expected failure occurs. if tblib is None: err = err[0], err[1], None self.check_picklable(test, err) self.events.append(('addExpectedFailure', self.test_index, err)) def addUnexpectedSuccess(self, test): self.events.append(('addUnexpectedSuccess', self.test_index)) self.stop_if_failfast() class RemoteTestRunner: """ Run tests and record everything but don't display anything. The implementation matches the unpythonic coding style of unittest2. """ resultclass = RemoteTestResult def __init__(self, failfast=False, resultclass=None): self.failfast = failfast if resultclass is not None: self.resultclass = resultclass def run(self, test): result = self.resultclass() unittest.registerResult(result) result.failfast = self.failfast test(result) return result def default_test_processes(): """Default number of test processes when using the --parallel option.""" # The current implementation of the parallel test runner requires # multiprocessing to start subprocesses with fork(). if multiprocessing.get_start_method() != 'fork': return 1 try: return int(os.environ['DJANGO_TEST_PROCESSES']) except KeyError: return multiprocessing.cpu_count() _worker_id = 0 def _init_worker(counter): """ Switch to databases dedicated to this worker. This helper lives at module-level because of the multiprocessing module's requirements. """ global _worker_id with counter.get_lock(): counter.value += 1 _worker_id = counter.value for alias in connections: connection = connections[alias] settings_dict = connection.creation.get_test_db_clone_settings(str(_worker_id)) # connection.settings_dict must be updated in place for changes to be # reflected in django.db.connections. If the following line assigned # connection.settings_dict = settings_dict, new threads would connect # to the default database instead of the appropriate clone. connection.settings_dict.update(settings_dict) connection.close() def _run_subsuite(args): """ Run a suite of tests with a RemoteTestRunner and return a RemoteTestResult. This helper lives at module-level and its arguments are wrapped in a tuple because of the multiprocessing module's requirements. """ runner_class, subsuite_index, subsuite, failfast = args runner = runner_class(failfast=failfast) result = runner.run(subsuite) return subsuite_index, result.events class ParallelTestSuite(unittest.TestSuite): """ Run a series of tests in parallel in several processes. While the unittest module's documentation implies that orchestrating the execution of tests is the responsibility of the test runner, in practice, it appears that TestRunner classes are more concerned with formatting and displaying test results. Since there are fewer use cases for customizing TestSuite than TestRunner, implementing parallelization at the level of the TestSuite improves interoperability with existing custom test runners. A single instance of a test runner can still collect results from all tests without being aware that they have been run in parallel. """ # In case someone wants to modify these in a subclass. init_worker = _init_worker run_subsuite = _run_subsuite runner_class = RemoteTestRunner def __init__(self, suite, processes, failfast=False): self.subsuites = partition_suite_by_case(suite) self.processes = processes self.failfast = failfast super().__init__() def run(self, result): """ Distribute test cases across workers. Return an identifier of each test case with its result in order to use imap_unordered to show results as soon as they're available. To minimize pickling errors when getting results from workers: - pass back numeric indexes in self.subsuites instead of tests - make tracebacks picklable with tblib, if available Even with tblib, errors may still occur for dynamically created exception classes which cannot be unpickled. """ counter = multiprocessing.Value(ctypes.c_int, 0) pool = multiprocessing.Pool( processes=self.processes, initializer=self.init_worker.__func__, initargs=[counter], ) args = [ (self.runner_class, index, subsuite, self.failfast) for index, subsuite in enumerate(self.subsuites) ] test_results = pool.imap_unordered(self.run_subsuite.__func__, args) while True: if result.shouldStop: pool.terminate() break try: subsuite_index, events = test_results.next(timeout=0.1) except multiprocessing.TimeoutError: continue except StopIteration: pool.close() break tests = list(self.subsuites[subsuite_index]) for event in events: event_name = event[0] handler = getattr(result, event_name, None) if handler is None: continue test = tests[event[1]] args = event[2:] handler(test, *args) pool.join() return result def __iter__(self): return iter(self.subsuites) class DiscoverRunner: """A Django test runner that uses unittest2 test discovery.""" test_suite = unittest.TestSuite parallel_test_suite = ParallelTestSuite test_runner = unittest.TextTestRunner test_loader = unittest.defaultTestLoader reorder_by = (TestCase, SimpleTestCase) def __init__(self, pattern=None, top_level=None, verbosity=1, interactive=True, failfast=False, keepdb=False, reverse=False, debug_mode=False, debug_sql=False, parallel=0, tags=None, exclude_tags=None, test_name_patterns=None, pdb=False, buffer=False, **kwargs): self.pattern = pattern self.top_level = top_level self.verbosity = verbosity self.interactive = interactive self.failfast = failfast self.keepdb = keepdb self.reverse = reverse self.debug_mode = debug_mode self.debug_sql = debug_sql self.parallel = parallel self.tags = set(tags or []) self.exclude_tags = set(exclude_tags or []) self.pdb = pdb if self.pdb and self.parallel > 1: raise ValueError('You cannot use --pdb with parallel tests; pass --parallel=1 to use it.') self.buffer = buffer if self.buffer and self.parallel > 1: raise ValueError( 'You cannot use -b/--buffer with parallel tests; pass ' '--parallel=1 to use it.' ) self.test_name_patterns = None if test_name_patterns: # unittest does not export the _convert_select_pattern function # that converts command-line arguments to patterns. self.test_name_patterns = { pattern if '*' in pattern else '*%s*' % pattern for pattern in test_name_patterns } @classmethod def add_arguments(cls, parser): parser.add_argument( '-t', '--top-level-directory', dest='top_level', help='Top level of project for unittest discovery.', ) parser.add_argument( '-p', '--pattern', default="test*.py", help='The test matching pattern. Defaults to test*.py.', ) parser.add_argument( '--keepdb', action='store_true', help='Preserves the test DB between runs.' ) parser.add_argument( '-r', '--reverse', action='store_true', help='Reverses test cases order.', ) parser.add_argument( '--debug-mode', action='store_true', help='Sets settings.DEBUG to True.', ) parser.add_argument( '-d', '--debug-sql', action='store_true', help='Prints logged SQL queries on failure.', ) parser.add_argument( '--parallel', nargs='?', default=1, type=int, const=default_test_processes(), metavar='N', help='Run tests using up to N parallel processes.', ) parser.add_argument( '--tag', action='append', dest='tags', help='Run only tests with the specified tag. Can be used multiple times.', ) parser.add_argument( '--exclude-tag', action='append', dest='exclude_tags', help='Do not run tests with the specified tag. Can be used multiple times.', ) parser.add_argument( '--pdb', action='store_true', help='Runs a debugger (pdb, or ipdb if installed) on error or failure.' ) parser.add_argument( '-b', '--buffer', action='store_true', help='Discard output from passing tests.', ) if PY37: parser.add_argument( '-k', action='append', dest='test_name_patterns', help=( 'Only run test methods and classes that match the pattern ' 'or substring. Can be used multiple times. Same as ' 'unittest -k option.' ), ) def setup_test_environment(self, **kwargs): setup_test_environment(debug=self.debug_mode) unittest.installHandler() def build_suite(self, test_labels=None, extra_tests=None, **kwargs): suite = self.test_suite() test_labels = test_labels or ['.'] extra_tests = extra_tests or [] self.test_loader.testNamePatterns = self.test_name_patterns discover_kwargs = {} if self.pattern is not None: discover_kwargs['pattern'] = self.pattern if self.top_level is not None: discover_kwargs['top_level_dir'] = self.top_level for label in test_labels: kwargs = discover_kwargs.copy() tests = None label_as_path = os.path.abspath(label) # if a module, or "module.ClassName[.method_name]", just run those if not os.path.exists(label_as_path): tests = self.test_loader.loadTestsFromName(label) elif os.path.isdir(label_as_path) and not self.top_level: # Try to be a bit smarter than unittest about finding the # default top-level for a given directory path, to avoid # breaking relative imports. (Unittest's default is to set # top-level equal to the path, which means relative imports # will result in "Attempted relative import in non-package."). # We'd be happy to skip this and require dotted module paths # (which don't cause this problem) instead of file paths (which # do), but in the case of a directory in the cwd, which would # be equally valid if considered as a top-level module or as a # directory path, unittest unfortunately prefers the latter. top_level = label_as_path while True: init_py = os.path.join(top_level, '__init__.py') if os.path.exists(init_py): try_next = os.path.dirname(top_level) if try_next == top_level: # __init__.py all the way down? give up. break top_level = try_next continue break kwargs['top_level_dir'] = top_level if not (tests and tests.countTestCases()) and is_discoverable(label): # Try discovery if path is a package or directory tests = self.test_loader.discover(start_dir=label, **kwargs) # Make unittest forget the top-level dir it calculated from this # run, to support running tests from two different top-levels. self.test_loader._top_level_dir = None suite.addTests(tests) for test in extra_tests: suite.addTest(test) if self.tags or self.exclude_tags: if self.verbosity >= 2: if self.tags: print('Including test tag(s): %s.' % ', '.join(sorted(self.tags))) if self.exclude_tags: print('Excluding test tag(s): %s.' % ', '.join(sorted(self.exclude_tags))) suite = filter_tests_by_tags(suite, self.tags, self.exclude_tags) suite = reorder_suite(suite, self.reorder_by, self.reverse) if self.parallel > 1: parallel_suite = self.parallel_test_suite(suite, self.parallel, self.failfast) # Since tests are distributed across processes on a per-TestCase # basis, there's no need for more processes than TestCases. parallel_units = len(parallel_suite.subsuites) self.parallel = min(self.parallel, parallel_units) # If there's only one TestCase, parallelization isn't needed. if self.parallel > 1: suite = parallel_suite return suite def setup_databases(self, **kwargs): return _setup_databases( self.verbosity, self.interactive, self.keepdb, self.debug_sql, self.parallel, **kwargs ) def get_resultclass(self): if self.debug_sql: return DebugSQLTextTestResult elif self.pdb: return PDBDebugResult def get_test_runner_kwargs(self): return { 'failfast': self.failfast, 'resultclass': self.get_resultclass(), 'verbosity': self.verbosity, 'buffer': self.buffer, } def run_checks(self, databases): # Checks are run after database creation since some checks require # database access. call_command('check', verbosity=self.verbosity, databases=databases) def run_suite(self, suite, **kwargs): kwargs = self.get_test_runner_kwargs() runner = self.test_runner(**kwargs) return runner.run(suite) def teardown_databases(self, old_config, **kwargs): """Destroy all the non-mirror databases.""" _teardown_databases( old_config, verbosity=self.verbosity, parallel=self.parallel, keepdb=self.keepdb, ) def teardown_test_environment(self, **kwargs): unittest.removeHandler() teardown_test_environment() def suite_result(self, suite, result, **kwargs): return len(result.failures) + len(result.errors) def _get_databases(self, suite): databases = set() for test in suite: if isinstance(test, unittest.TestCase): test_databases = getattr(test, 'databases', None) if test_databases == '__all__': return set(connections) if test_databases: databases.update(test_databases) else: databases.update(self._get_databases(test)) return databases def get_databases(self, suite): databases = self._get_databases(suite) if self.verbosity >= 2: unused_databases = [alias for alias in connections if alias not in databases] if unused_databases: print('Skipping setup of unused database(s): %s.' % ', '.join(sorted(unused_databases))) return databases def run_tests(self, test_labels, extra_tests=None, **kwargs): """ Run the unit tests for all the test labels in the provided list. Test labels should be dotted Python paths to test modules, test classes, or test methods. A list of 'extra' tests may also be provided; these tests will be added to the test suite. Return the number of tests that failed. """ self.setup_test_environment() suite = self.build_suite(test_labels, extra_tests) databases = self.get_databases(suite) old_config = self.setup_databases(aliases=databases) run_failed = False try: self.run_checks(databases) result = self.run_suite(suite) except Exception: run_failed = True raise finally: try: self.teardown_databases(old_config) self.teardown_test_environment() except Exception: # Silence teardown exceptions if an exception was raised during # runs to avoid shadowing it. if not run_failed: raise return self.suite_result(suite, result) def is_discoverable(label): """ Check if a test label points to a Python package or file directory. Relative labels like "." and ".." are seen as directories. """ try: mod = import_module(label) except (ImportError, TypeError): pass else: return hasattr(mod, '__path__') return os.path.isdir(os.path.abspath(label)) def reorder_suite(suite, classes, reverse=False): """ Reorder a test suite by test type. `classes` is a sequence of types All tests of type classes[0] are placed first, then tests of type classes[1], etc. Tests with no match in classes are placed last. If `reverse` is True, sort tests within classes in opposite order but don't reverse test classes. """ class_count = len(classes) suite_class = type(suite) bins = [OrderedSet() for i in range(class_count + 1)] partition_suite_by_type(suite, classes, bins, reverse=reverse) reordered_suite = suite_class() for i in range(class_count + 1): reordered_suite.addTests(bins[i]) return reordered_suite def partition_suite_by_type(suite, classes, bins, reverse=False): """ Partition a test suite by test type. Also prevent duplicated tests. classes is a sequence of types bins is a sequence of TestSuites, one more than classes reverse changes the ordering of tests within bins Tests of type classes[i] are added to bins[i], tests with no match found in classes are place in bins[-1] """ suite_class = type(suite) if reverse: suite = reversed(tuple(suite)) for test in suite: if isinstance(test, suite_class): partition_suite_by_type(test, classes, bins, reverse=reverse) else: for i in range(len(classes)): if isinstance(test, classes[i]): bins[i].add(test) break else: bins[-1].add(test) def partition_suite_by_case(suite): """Partition a test suite by test case, preserving the order of tests.""" groups = [] suite_class = type(suite) for test_type, test_group in itertools.groupby(suite, type): if issubclass(test_type, unittest.TestCase): groups.append(suite_class(test_group)) else: for item in test_group: groups.extend(partition_suite_by_case(item)) return groups def filter_tests_by_tags(suite, tags, exclude_tags): suite_class = type(suite) filtered_suite = suite_class() for test in suite: if isinstance(test, suite_class): filtered_suite.addTests(filter_tests_by_tags(test, tags, exclude_tags)) else: test_tags = set(getattr(test, 'tags', set())) test_fn_name = getattr(test, '_testMethodName', str(test)) test_fn = getattr(test, test_fn_name, test) test_fn_tags = set(getattr(test_fn, 'tags', set())) all_tags = test_tags.union(test_fn_tags) matched_tags = all_tags.intersection(tags) if (matched_tags or not tags) and not all_tags.intersection(exclude_tags): filtered_suite.addTest(test) return filtered_suite
db0225d66aa7ac5b20fb4e34a2b58fa03058135ffa5cbfc0c2bcfa6c6a8f8b23
import json import mimetypes import os import sys from copy import copy from functools import partial from http import HTTPStatus from importlib import import_module from io import BytesIO from urllib.parse import unquote_to_bytes, urljoin, urlparse, urlsplit from asgiref.sync import sync_to_async from django.conf import settings from django.core.handlers.asgi import ASGIRequest from django.core.handlers.base import BaseHandler from django.core.handlers.wsgi import WSGIRequest from django.core.serializers.json import DjangoJSONEncoder from django.core.signals import ( got_request_exception, request_finished, request_started, ) from django.db import close_old_connections from django.http import HttpRequest, QueryDict, SimpleCookie from django.test import signals from django.test.utils import ContextList from django.urls import resolve from django.utils.encoding import force_bytes from django.utils.functional import SimpleLazyObject from django.utils.http import urlencode from django.utils.itercompat import is_iterable from django.utils.regex_helper import _lazy_re_compile __all__ = ('Client', 'RedirectCycleError', 'RequestFactory', 'encode_file', 'encode_multipart') BOUNDARY = 'BoUnDaRyStRiNg' MULTIPART_CONTENT = 'multipart/form-data; boundary=%s' % BOUNDARY CONTENT_TYPE_RE = _lazy_re_compile(r'.*; charset=([\w\d-]+);?') # Structured suffix spec: https://tools.ietf.org/html/rfc6838#section-4.2.8 JSON_CONTENT_TYPE_RE = _lazy_re_compile(r'^application\/(.+\+)?json') class RedirectCycleError(Exception): """The test client has been asked to follow a redirect loop.""" def __init__(self, message, last_response): super().__init__(message) self.last_response = last_response self.redirect_chain = last_response.redirect_chain class FakePayload: """ A wrapper around BytesIO that restricts what can be read since data from the network can't be sought and cannot be read outside of its content length. This makes sure that views can't do anything under the test client that wouldn't work in real life. """ def __init__(self, content=None): self.__content = BytesIO() self.__len = 0 self.read_started = False if content is not None: self.write(content) def __len__(self): return self.__len def read(self, num_bytes=None): if not self.read_started: self.__content.seek(0) self.read_started = True if num_bytes is None: num_bytes = self.__len or 0 assert self.__len >= num_bytes, "Cannot read more than the available bytes from the HTTP incoming data." content = self.__content.read(num_bytes) self.__len -= num_bytes return content def write(self, content): if self.read_started: raise ValueError("Unable to write a payload after it's been read") content = force_bytes(content) self.__content.write(content) self.__len += len(content) def closing_iterator_wrapper(iterable, close): try: yield from iterable finally: request_finished.disconnect(close_old_connections) close() # will fire request_finished request_finished.connect(close_old_connections) def conditional_content_removal(request, response): """ Simulate the behavior of most Web servers by removing the content of responses for HEAD requests, 1xx, 204, and 304 responses. Ensure compliance with RFC 7230, section 3.3.3. """ if 100 <= response.status_code < 200 or response.status_code in (204, 304): if response.streaming: response.streaming_content = [] else: response.content = b'' if request.method == 'HEAD': if response.streaming: response.streaming_content = [] else: response.content = b'' return response class ClientHandler(BaseHandler): """ A HTTP Handler that can be used for testing purposes. Use the WSGI interface to compose requests, but return the raw HttpResponse object with the originating WSGIRequest attached to its ``wsgi_request`` attribute. """ def __init__(self, enforce_csrf_checks=True, *args, **kwargs): self.enforce_csrf_checks = enforce_csrf_checks super().__init__(*args, **kwargs) def __call__(self, environ): # Set up middleware if needed. We couldn't do this earlier, because # settings weren't available. if self._middleware_chain is None: self.load_middleware() request_started.disconnect(close_old_connections) request_started.send(sender=self.__class__, environ=environ) request_started.connect(close_old_connections) request = WSGIRequest(environ) # sneaky little hack so that we can easily get round # CsrfViewMiddleware. This makes life easier, and is probably # required for backwards compatibility with external tests against # admin views. request._dont_enforce_csrf_checks = not self.enforce_csrf_checks # Request goes through middleware. response = self.get_response(request) # Simulate behaviors of most Web servers. conditional_content_removal(request, response) # Attach the originating request to the response so that it could be # later retrieved. response.wsgi_request = request # Emulate a WSGI server by calling the close method on completion. if response.streaming: response.streaming_content = closing_iterator_wrapper( response.streaming_content, response.close) else: request_finished.disconnect(close_old_connections) response.close() # will fire request_finished request_finished.connect(close_old_connections) return response class AsyncClientHandler(BaseHandler): """An async version of ClientHandler.""" def __init__(self, enforce_csrf_checks=True, *args, **kwargs): self.enforce_csrf_checks = enforce_csrf_checks super().__init__(*args, **kwargs) async def __call__(self, scope): # Set up middleware if needed. We couldn't do this earlier, because # settings weren't available. if self._middleware_chain is None: self.load_middleware(is_async=True) # Extract body file from the scope, if provided. if '_body_file' in scope: body_file = scope.pop('_body_file') else: body_file = FakePayload('') request_started.disconnect(close_old_connections) await sync_to_async(request_started.send)(sender=self.__class__, scope=scope) request_started.connect(close_old_connections) request = ASGIRequest(scope, body_file) # Sneaky little hack so that we can easily get round # CsrfViewMiddleware. This makes life easier, and is probably required # for backwards compatibility with external tests against admin views. request._dont_enforce_csrf_checks = not self.enforce_csrf_checks # Request goes through middleware. response = await self.get_response_async(request) # Simulate behaviors of most Web servers. conditional_content_removal(request, response) # Attach the originating ASGI request to the response so that it could # be later retrieved. response.asgi_request = request # Emulate a server by calling the close method on completion. if response.streaming: response.streaming_content = await sync_to_async(closing_iterator_wrapper)( response.streaming_content, response.close, ) else: request_finished.disconnect(close_old_connections) # Will fire request_finished. await sync_to_async(response.close)() request_finished.connect(close_old_connections) return response def store_rendered_templates(store, signal, sender, template, context, **kwargs): """ Store templates and contexts that are rendered. The context is copied so that it is an accurate representation at the time of rendering. """ store.setdefault('templates', []).append(template) if 'context' not in store: store['context'] = ContextList() store['context'].append(copy(context)) def encode_multipart(boundary, data): """ Encode multipart POST data from a dictionary of form values. The key will be used as the form data name; the value will be transmitted as content. If the value is a file, the contents of the file will be sent as an application/octet-stream; otherwise, str(value) will be sent. """ lines = [] def to_bytes(s): return force_bytes(s, settings.DEFAULT_CHARSET) # Not by any means perfect, but good enough for our purposes. def is_file(thing): return hasattr(thing, "read") and callable(thing.read) # Each bit of the multipart form data could be either a form value or a # file, or a *list* of form values and/or files. Remember that HTTP field # names can be duplicated! for (key, value) in data.items(): if value is None: raise TypeError( "Cannot encode None for key '%s' as POST data. Did you mean " "to pass an empty string or omit the value?" % key ) elif is_file(value): lines.extend(encode_file(boundary, key, value)) elif not isinstance(value, str) and is_iterable(value): for item in value: if is_file(item): lines.extend(encode_file(boundary, key, item)) else: lines.extend(to_bytes(val) for val in [ '--%s' % boundary, 'Content-Disposition: form-data; name="%s"' % key, '', item ]) else: lines.extend(to_bytes(val) for val in [ '--%s' % boundary, 'Content-Disposition: form-data; name="%s"' % key, '', value ]) lines.extend([ to_bytes('--%s--' % boundary), b'', ]) return b'\r\n'.join(lines) def encode_file(boundary, key, file): def to_bytes(s): return force_bytes(s, settings.DEFAULT_CHARSET) # file.name might not be a string. For example, it's an int for # tempfile.TemporaryFile(). file_has_string_name = hasattr(file, 'name') and isinstance(file.name, str) filename = os.path.basename(file.name) if file_has_string_name else '' if hasattr(file, 'content_type'): content_type = file.content_type elif filename: content_type = mimetypes.guess_type(filename)[0] else: content_type = None if content_type is None: content_type = 'application/octet-stream' filename = filename or key return [ to_bytes('--%s' % boundary), to_bytes('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename)), to_bytes('Content-Type: %s' % content_type), b'', to_bytes(file.read()) ] class RequestFactory: """ Class that lets you create mock Request objects for use in testing. Usage: rf = RequestFactory() get_request = rf.get('/hello/') post_request = rf.post('/submit/', {'foo': 'bar'}) Once you have a request object you can pass it to any view function, just as if that view had been hooked up using a URLconf. """ def __init__(self, *, json_encoder=DjangoJSONEncoder, **defaults): self.json_encoder = json_encoder self.defaults = defaults self.cookies = SimpleCookie() self.errors = BytesIO() def _base_environ(self, **request): """ The base environment for a request. """ # This is a minimal valid WSGI environ dictionary, plus: # - HTTP_COOKIE: for cookie support, # - REMOTE_ADDR: often useful, see #8551. # See https://www.python.org/dev/peps/pep-3333/#environ-variables return { 'HTTP_COOKIE': '; '.join(sorted( '%s=%s' % (morsel.key, morsel.coded_value) for morsel in self.cookies.values() )), 'PATH_INFO': '/', 'REMOTE_ADDR': '127.0.0.1', 'REQUEST_METHOD': 'GET', 'SCRIPT_NAME': '', 'SERVER_NAME': 'testserver', 'SERVER_PORT': '80', 'SERVER_PROTOCOL': 'HTTP/1.1', 'wsgi.version': (1, 0), 'wsgi.url_scheme': 'http', 'wsgi.input': FakePayload(b''), 'wsgi.errors': self.errors, 'wsgi.multiprocess': True, 'wsgi.multithread': False, 'wsgi.run_once': False, **self.defaults, **request, } def request(self, **request): "Construct a generic request object." return WSGIRequest(self._base_environ(**request)) def _encode_data(self, data, content_type): if content_type is MULTIPART_CONTENT: return encode_multipart(BOUNDARY, data) else: # Encode the content so that the byte representation is correct. match = CONTENT_TYPE_RE.match(content_type) if match: charset = match.group(1) else: charset = settings.DEFAULT_CHARSET return force_bytes(data, encoding=charset) def _encode_json(self, data, content_type): """ Return encoded JSON if data is a dict, list, or tuple and content_type is application/json. """ should_encode = JSON_CONTENT_TYPE_RE.match(content_type) and isinstance(data, (dict, list, tuple)) return json.dumps(data, cls=self.json_encoder) if should_encode else data def _get_path(self, parsed): path = parsed.path # If there are parameters, add them if parsed.params: path += ";" + parsed.params path = unquote_to_bytes(path) # Replace the behavior where non-ASCII values in the WSGI environ are # arbitrarily decoded with ISO-8859-1. # Refs comment in `get_bytes_from_wsgi()`. return path.decode('iso-8859-1') def get(self, path, data=None, secure=False, **extra): """Construct a GET request.""" data = {} if data is None else data return self.generic('GET', path, secure=secure, **{ 'QUERY_STRING': urlencode(data, doseq=True), **extra, }) def post(self, path, data=None, content_type=MULTIPART_CONTENT, secure=False, **extra): """Construct a POST request.""" data = self._encode_json({} if data is None else data, content_type) post_data = self._encode_data(data, content_type) return self.generic('POST', path, post_data, content_type, secure=secure, **extra) def head(self, path, data=None, secure=False, **extra): """Construct a HEAD request.""" data = {} if data is None else data return self.generic('HEAD', path, secure=secure, **{ 'QUERY_STRING': urlencode(data, doseq=True), **extra, }) def trace(self, path, secure=False, **extra): """Construct a TRACE request.""" return self.generic('TRACE', path, secure=secure, **extra) def options(self, path, data='', content_type='application/octet-stream', secure=False, **extra): "Construct an OPTIONS request." return self.generic('OPTIONS', path, data, content_type, secure=secure, **extra) def put(self, path, data='', content_type='application/octet-stream', secure=False, **extra): """Construct a PUT request.""" data = self._encode_json(data, content_type) return self.generic('PUT', path, data, content_type, secure=secure, **extra) def patch(self, path, data='', content_type='application/octet-stream', secure=False, **extra): """Construct a PATCH request.""" data = self._encode_json(data, content_type) return self.generic('PATCH', path, data, content_type, secure=secure, **extra) def delete(self, path, data='', content_type='application/octet-stream', secure=False, **extra): """Construct a DELETE request.""" data = self._encode_json(data, content_type) return self.generic('DELETE', path, data, content_type, secure=secure, **extra) def generic(self, method, path, data='', content_type='application/octet-stream', secure=False, **extra): """Construct an arbitrary HTTP request.""" parsed = urlparse(str(path)) # path can be lazy data = force_bytes(data, settings.DEFAULT_CHARSET) r = { 'PATH_INFO': self._get_path(parsed), 'REQUEST_METHOD': method, 'SERVER_PORT': '443' if secure else '80', 'wsgi.url_scheme': 'https' if secure else 'http', } if data: r.update({ 'CONTENT_LENGTH': str(len(data)), 'CONTENT_TYPE': content_type, 'wsgi.input': FakePayload(data), }) r.update(extra) # If QUERY_STRING is absent or empty, we want to extract it from the URL. if not r.get('QUERY_STRING'): # WSGI requires latin-1 encoded strings. See get_path_info(). query_string = parsed[4].encode().decode('iso-8859-1') r['QUERY_STRING'] = query_string return self.request(**r) class AsyncRequestFactory(RequestFactory): """ Class that lets you create mock ASGI-like Request objects for use in testing. Usage: rf = AsyncRequestFactory() get_request = await rf.get('/hello/') post_request = await rf.post('/submit/', {'foo': 'bar'}) Once you have a request object you can pass it to any view function, including synchronous ones. The reason we have a separate class here is: a) this makes ASGIRequest subclasses, and b) AsyncTestClient can subclass it. """ def _base_scope(self, **request): """The base scope for a request.""" # This is a minimal valid ASGI scope, plus: # - headers['cookie'] for cookie support, # - 'client' often useful, see #8551. scope = { 'asgi': {'version': '3.0'}, 'type': 'http', 'http_version': '1.1', 'client': ['127.0.0.1', 0], 'server': ('testserver', '80'), 'scheme': 'http', 'method': 'GET', 'headers': [], **self.defaults, **request, } scope['headers'].append(( b'cookie', b'; '.join(sorted( ('%s=%s' % (morsel.key, morsel.coded_value)).encode('ascii') for morsel in self.cookies.values() )), )) return scope def request(self, **request): """Construct a generic request object.""" # This is synchronous, which means all methods on this class are. # AsyncClient, however, has an async request function, which makes all # its methods async. if '_body_file' in request: body_file = request.pop('_body_file') else: body_file = FakePayload('') return ASGIRequest(self._base_scope(**request), body_file) def generic( self, method, path, data='', content_type='application/octet-stream', secure=False, **extra, ): """Construct an arbitrary HTTP request.""" parsed = urlparse(str(path)) # path can be lazy. data = force_bytes(data, settings.DEFAULT_CHARSET) s = { 'method': method, 'path': self._get_path(parsed), 'server': ('127.0.0.1', '443' if secure else '80'), 'scheme': 'https' if secure else 'http', 'headers': [(b'host', b'testserver')], } if data: s['headers'].extend([ (b'content-length', bytes(len(data))), (b'content-type', content_type.encode('ascii')), ]) s['_body_file'] = FakePayload(data) s.update(extra) # If QUERY_STRING is absent or empty, we want to extract it from the # URL. if not s.get('query_string'): s['query_string'] = parsed[4] return self.request(**s) class ClientMixin: """ Mixin with common methods between Client and AsyncClient. """ def store_exc_info(self, **kwargs): """Store exceptions when they are generated by a view.""" self.exc_info = sys.exc_info() def check_exception(self, response): """ Look for a signaled exception, clear the current context exception data, re-raise the signaled exception, and clear the signaled exception from the local cache. """ response.exc_info = self.exc_info if self.exc_info: _, exc_value, _ = self.exc_info self.exc_info = None if self.raise_request_exception: raise exc_value @property def session(self): """Return the current session variables.""" engine = import_module(settings.SESSION_ENGINE) cookie = self.cookies.get(settings.SESSION_COOKIE_NAME) if cookie: return engine.SessionStore(cookie.value) session = engine.SessionStore() session.save() self.cookies[settings.SESSION_COOKIE_NAME] = session.session_key return session def login(self, **credentials): """ Set the Factory to appear as if it has successfully logged into a site. Return True if login is possible or False if the provided credentials are incorrect. """ from django.contrib.auth import authenticate user = authenticate(**credentials) if user: self._login(user) return True return False def force_login(self, user, backend=None): def get_backend(): from django.contrib.auth import load_backend for backend_path in settings.AUTHENTICATION_BACKENDS: backend = load_backend(backend_path) if hasattr(backend, 'get_user'): return backend_path if backend is None: backend = get_backend() user.backend = backend self._login(user, backend) def _login(self, user, backend=None): from django.contrib.auth import login # Create a fake request to store login details. request = HttpRequest() if self.session: request.session = self.session else: engine = import_module(settings.SESSION_ENGINE) request.session = engine.SessionStore() login(request, user, backend) # Save the session values. request.session.save() # Set the cookie to represent the session. session_cookie = settings.SESSION_COOKIE_NAME self.cookies[session_cookie] = request.session.session_key cookie_data = { 'max-age': None, 'path': '/', 'domain': settings.SESSION_COOKIE_DOMAIN, 'secure': settings.SESSION_COOKIE_SECURE or None, 'expires': None, } self.cookies[session_cookie].update(cookie_data) def logout(self): """Log out the user by removing the cookies and session object.""" from django.contrib.auth import get_user, logout request = HttpRequest() if self.session: request.session = self.session request.user = get_user(request) else: engine = import_module(settings.SESSION_ENGINE) request.session = engine.SessionStore() logout(request) self.cookies = SimpleCookie() def _parse_json(self, response, **extra): if not hasattr(response, '_json'): if not JSON_CONTENT_TYPE_RE.match(response.get('Content-Type')): raise ValueError( 'Content-Type header is "%s", not "application/json"' % response.get('Content-Type') ) response._json = json.loads(response.content.decode(response.charset), **extra) return response._json class Client(ClientMixin, RequestFactory): """ A class that can act as a client for testing purposes. It allows the user to compose GET and POST requests, and obtain the response that the server gave to those requests. The server Response objects are annotated with the details of the contexts and templates that were rendered during the process of serving the request. Client objects are stateful - they will retain cookie (and thus session) details for the lifetime of the Client instance. This is not intended as a replacement for Twill/Selenium or the like - it is here to allow testing against the contexts and templates produced by a view, rather than the HTML rendered to the end-user. """ def __init__(self, enforce_csrf_checks=False, raise_request_exception=True, **defaults): super().__init__(**defaults) self.handler = ClientHandler(enforce_csrf_checks) self.raise_request_exception = raise_request_exception self.exc_info = None self.extra = None def request(self, **request): """ The master request method. Compose the environment dictionary and pass to the handler, return the result of the handler. Assume defaults for the query environment, which can be overridden using the arguments to the request. """ environ = self._base_environ(**request) # Curry a data dictionary into an instance of the template renderer # callback function. data = {} on_template_render = partial(store_rendered_templates, data) signal_uid = "template-render-%s" % id(request) signals.template_rendered.connect(on_template_render, dispatch_uid=signal_uid) # Capture exceptions created by the handler. exception_uid = "request-exception-%s" % id(request) got_request_exception.connect(self.store_exc_info, dispatch_uid=exception_uid) try: response = self.handler(environ) finally: signals.template_rendered.disconnect(dispatch_uid=signal_uid) got_request_exception.disconnect(dispatch_uid=exception_uid) # Check for signaled exceptions. self.check_exception(response) # Save the client and request that stimulated the response. response.client = self response.request = request # Add any rendered template detail to the response. response.templates = data.get('templates', []) response.context = data.get('context') response.json = partial(self._parse_json, response) # Attach the ResolverMatch instance to the response. response.resolver_match = SimpleLazyObject(lambda: resolve(request['PATH_INFO'])) # Flatten a single context. Not really necessary anymore thanks to the # __getattr__ flattening in ContextList, but has some edge case # backwards compatibility implications. if response.context and len(response.context) == 1: response.context = response.context[0] # Update persistent cookie data. if response.cookies: self.cookies.update(response.cookies) return response def get(self, path, data=None, follow=False, secure=False, **extra): """Request a response from the server using GET.""" self.extra = extra response = super().get(path, data=data, secure=secure, **extra) if follow: response = self._handle_redirects(response, data=data, **extra) return response def post(self, path, data=None, content_type=MULTIPART_CONTENT, follow=False, secure=False, **extra): """Request a response from the server using POST.""" self.extra = extra response = super().post(path, data=data, content_type=content_type, secure=secure, **extra) if follow: response = self._handle_redirects(response, data=data, content_type=content_type, **extra) return response def head(self, path, data=None, follow=False, secure=False, **extra): """Request a response from the server using HEAD.""" self.extra = extra response = super().head(path, data=data, secure=secure, **extra) if follow: response = self._handle_redirects(response, data=data, **extra) return response def options(self, path, data='', content_type='application/octet-stream', follow=False, secure=False, **extra): """Request a response from the server using OPTIONS.""" self.extra = extra response = super().options(path, data=data, content_type=content_type, secure=secure, **extra) if follow: response = self._handle_redirects(response, data=data, content_type=content_type, **extra) return response def put(self, path, data='', content_type='application/octet-stream', follow=False, secure=False, **extra): """Send a resource to the server using PUT.""" self.extra = extra response = super().put(path, data=data, content_type=content_type, secure=secure, **extra) if follow: response = self._handle_redirects(response, data=data, content_type=content_type, **extra) return response def patch(self, path, data='', content_type='application/octet-stream', follow=False, secure=False, **extra): """Send a resource to the server using PATCH.""" self.extra = extra response = super().patch(path, data=data, content_type=content_type, secure=secure, **extra) if follow: response = self._handle_redirects(response, data=data, content_type=content_type, **extra) return response def delete(self, path, data='', content_type='application/octet-stream', follow=False, secure=False, **extra): """Send a DELETE request to the server.""" self.extra = extra response = super().delete(path, data=data, content_type=content_type, secure=secure, **extra) if follow: response = self._handle_redirects(response, data=data, content_type=content_type, **extra) return response def trace(self, path, data='', follow=False, secure=False, **extra): """Send a TRACE request to the server.""" self.extra = extra response = super().trace(path, data=data, secure=secure, **extra) if follow: response = self._handle_redirects(response, data=data, **extra) return response def _handle_redirects(self, response, data='', content_type='', **extra): """ Follow any redirects by requesting responses from the server using GET. """ response.redirect_chain = [] redirect_status_codes = ( HTTPStatus.MOVED_PERMANENTLY, HTTPStatus.FOUND, HTTPStatus.SEE_OTHER, HTTPStatus.TEMPORARY_REDIRECT, HTTPStatus.PERMANENT_REDIRECT, ) while response.status_code in redirect_status_codes: response_url = response.url redirect_chain = response.redirect_chain redirect_chain.append((response_url, response.status_code)) url = urlsplit(response_url) if url.scheme: extra['wsgi.url_scheme'] = url.scheme if url.hostname: extra['SERVER_NAME'] = url.hostname if url.port: extra['SERVER_PORT'] = str(url.port) # Prepend the request path to handle relative path redirects path = url.path if not path.startswith('/'): path = urljoin(response.request['PATH_INFO'], path) if response.status_code in (HTTPStatus.TEMPORARY_REDIRECT, HTTPStatus.PERMANENT_REDIRECT): # Preserve request method post-redirect for 307/308 responses. request_method = getattr(self, response.request['REQUEST_METHOD'].lower()) else: request_method = self.get data = QueryDict(url.query) content_type = None response = request_method(path, data=data, content_type=content_type, follow=False, **extra) response.redirect_chain = redirect_chain if redirect_chain[-1] in redirect_chain[:-1]: # Check that we're not redirecting to somewhere we've already # been to, to prevent loops. raise RedirectCycleError("Redirect loop detected.", last_response=response) if len(redirect_chain) > 20: # Such a lengthy chain likely also means a loop, but one with # a growing path, changing view, or changing query argument; # 20 is the value of "network.http.redirection-limit" from Firefox. raise RedirectCycleError("Too many redirects.", last_response=response) return response class AsyncClient(ClientMixin, AsyncRequestFactory): """ An async version of Client that creates ASGIRequests and calls through an async request path. Does not currently support "follow" on its methods. """ def __init__(self, enforce_csrf_checks=False, raise_request_exception=True, **defaults): super().__init__(**defaults) self.handler = AsyncClientHandler(enforce_csrf_checks) self.raise_request_exception = raise_request_exception self.exc_info = None self.extra = None async def request(self, **request): """ The master request method. Compose the scope dictionary and pass to the handler, return the result of the handler. Assume defaults for the query environment, which can be overridden using the arguments to the request. """ if 'follow' in request: raise NotImplementedError( 'AsyncClient request methods do not accept the follow ' 'parameter.' ) scope = self._base_scope(**request) # Curry a data dictionary into an instance of the template renderer # callback function. data = {} on_template_render = partial(store_rendered_templates, data) signal_uid = 'template-render-%s' % id(request) signals.template_rendered.connect(on_template_render, dispatch_uid=signal_uid) # Capture exceptions created by the handler. exception_uid = 'request-exception-%s' % id(request) got_request_exception.connect(self.store_exc_info, dispatch_uid=exception_uid) try: response = await self.handler(scope) finally: signals.template_rendered.disconnect(dispatch_uid=signal_uid) got_request_exception.disconnect(dispatch_uid=exception_uid) # Check for signaled exceptions. self.check_exception(response) # Save the client and request that stimulated the response. response.client = self response.request = request # Add any rendered template detail to the response. response.templates = data.get('templates', []) response.context = data.get('context') response.json = partial(self._parse_json, response) # Attach the ResolverMatch instance to the response. response.resolver_match = SimpleLazyObject(lambda: resolve(request['path'])) # Flatten a single context. Not really necessary anymore thanks to the # __getattr__ flattening in ContextList, but has some edge case # backwards compatibility implications. if response.context and len(response.context) == 1: response.context = response.context[0] # Update persistent cookie data. if response.cookies: self.cookies.update(response.cookies) return response
884a6a2448b963c8370358664f43226a96a42cfc664f88d58362565738ff5212
import asyncio import difflib import json import posixpath import sys import threading import unittest from collections import Counter from contextlib import contextmanager from copy import copy from difflib import get_close_matches from functools import wraps from unittest.suite import _DebugResult from unittest.util import safe_repr from urllib.parse import ( parse_qsl, unquote, urlencode, urljoin, urlparse, urlsplit, urlunparse, ) from urllib.request import url2pathname from asgiref.sync import async_to_sync from django.apps import apps from django.conf import settings from django.core import mail from django.core.exceptions import ImproperlyConfigured, ValidationError from django.core.files import locks from django.core.handlers.wsgi import WSGIHandler, get_path_info from django.core.management import call_command from django.core.management.color import no_style from django.core.management.sql import emit_post_migrate_signal from django.core.servers.basehttp import ThreadedWSGIServer, WSGIRequestHandler from django.db import DEFAULT_DB_ALIAS, connection, connections, transaction from django.forms.fields import CharField from django.http import QueryDict from django.http.request import split_domain_port, validate_host from django.test.client import AsyncClient, Client from django.test.html import HTMLParseError, parse_html from django.test.signals import setting_changed, template_rendered from django.test.utils import ( CaptureQueriesContext, ContextList, compare_xml, modify_settings, override_settings, ) from django.utils.functional import classproperty from django.views.static import serve __all__ = ('TestCase', 'TransactionTestCase', 'SimpleTestCase', 'skipIfDBFeature', 'skipUnlessDBFeature') def to_list(value): """ Put value into a list if it's not already one. Return an empty list if value is None. """ if value is None: value = [] elif not isinstance(value, list): value = [value] return value def assert_and_parse_html(self, html, user_msg, msg): try: dom = parse_html(html) except HTMLParseError as e: standardMsg = '%s\n%s' % (msg, e) self.fail(self._formatMessage(user_msg, standardMsg)) return dom class _AssertNumQueriesContext(CaptureQueriesContext): def __init__(self, test_case, num, connection): self.test_case = test_case self.num = num super().__init__(connection) def __exit__(self, exc_type, exc_value, traceback): super().__exit__(exc_type, exc_value, traceback) if exc_type is not None: return executed = len(self) self.test_case.assertEqual( executed, self.num, "%d queries executed, %d expected\nCaptured queries were:\n%s" % ( executed, self.num, '\n'.join( '%d. %s' % (i, query['sql']) for i, query in enumerate(self.captured_queries, start=1) ) ) ) class _AssertTemplateUsedContext: def __init__(self, test_case, template_name): self.test_case = test_case self.template_name = template_name self.rendered_templates = [] self.rendered_template_names = [] self.context = ContextList() def on_template_render(self, sender, signal, template, context, **kwargs): self.rendered_templates.append(template) self.rendered_template_names.append(template.name) self.context.append(copy(context)) def test(self): return self.template_name in self.rendered_template_names def message(self): return '%s was not rendered.' % self.template_name def __enter__(self): template_rendered.connect(self.on_template_render) return self def __exit__(self, exc_type, exc_value, traceback): template_rendered.disconnect(self.on_template_render) if exc_type is not None: return if not self.test(): message = self.message() if self.rendered_templates: message += ' Following templates were rendered: %s' % ( ', '.join(self.rendered_template_names) ) else: message += ' No template was rendered.' self.test_case.fail(message) class _AssertTemplateNotUsedContext(_AssertTemplateUsedContext): def test(self): return self.template_name not in self.rendered_template_names def message(self): return '%s was rendered.' % self.template_name class _DatabaseFailure: def __init__(self, wrapped, message): self.wrapped = wrapped self.message = message def __call__(self): raise AssertionError(self.message) class SimpleTestCase(unittest.TestCase): # The class we'll use for the test client self.client. # Can be overridden in derived classes. client_class = Client async_client_class = AsyncClient _overridden_settings = None _modified_settings = None databases = set() _disallowed_database_msg = ( 'Database %(operation)s to %(alias)r are not allowed in SimpleTestCase ' 'subclasses. Either subclass TestCase or TransactionTestCase to ensure ' 'proper test isolation or add %(alias)r to %(test)s.databases to silence ' 'this failure.' ) _disallowed_connection_methods = [ ('connect', 'connections'), ('temporary_connection', 'connections'), ('cursor', 'queries'), ('chunked_cursor', 'queries'), ] @classmethod def setUpClass(cls): super().setUpClass() if cls._overridden_settings: cls._cls_overridden_context = override_settings(**cls._overridden_settings) cls._cls_overridden_context.enable() if cls._modified_settings: cls._cls_modified_context = modify_settings(cls._modified_settings) cls._cls_modified_context.enable() cls._add_databases_failures() @classmethod def _validate_databases(cls): if cls.databases == '__all__': return frozenset(connections) for alias in cls.databases: if alias not in connections: message = '%s.%s.databases refers to %r which is not defined in settings.DATABASES.' % ( cls.__module__, cls.__qualname__, alias, ) close_matches = get_close_matches(alias, list(connections)) if close_matches: message += ' Did you mean %r?' % close_matches[0] raise ImproperlyConfigured(message) return frozenset(cls.databases) @classmethod def _add_databases_failures(cls): cls.databases = cls._validate_databases() for alias in connections: if alias in cls.databases: continue connection = connections[alias] for name, operation in cls._disallowed_connection_methods: message = cls._disallowed_database_msg % { 'test': '%s.%s' % (cls.__module__, cls.__qualname__), 'alias': alias, 'operation': operation, } method = getattr(connection, name) setattr(connection, name, _DatabaseFailure(method, message)) @classmethod def _remove_databases_failures(cls): for alias in connections: if alias in cls.databases: continue connection = connections[alias] for name, _ in cls._disallowed_connection_methods: method = getattr(connection, name) setattr(connection, name, method.wrapped) @classmethod def tearDownClass(cls): cls._remove_databases_failures() if hasattr(cls, '_cls_modified_context'): cls._cls_modified_context.disable() delattr(cls, '_cls_modified_context') if hasattr(cls, '_cls_overridden_context'): cls._cls_overridden_context.disable() delattr(cls, '_cls_overridden_context') super().tearDownClass() def __call__(self, result=None): """ Wrapper around default __call__ method to perform common Django test set up. This means that user-defined Test Cases aren't required to include a call to super().setUp(). """ self._setup_and_call(result) def debug(self): """Perform the same as __call__(), without catching the exception.""" debug_result = _DebugResult() self._setup_and_call(debug_result, debug=True) def _setup_and_call(self, result, debug=False): """ Perform the following in order: pre-setup, run test, post-teardown, skipping pre/post hooks if test is set to be skipped. If debug=True, reraise any errors in setup and use super().debug() instead of __call__() to run the test. """ testMethod = getattr(self, self._testMethodName) skipped = ( getattr(self.__class__, "__unittest_skip__", False) or getattr(testMethod, "__unittest_skip__", False) ) # Convert async test methods. if asyncio.iscoroutinefunction(testMethod): setattr(self, self._testMethodName, async_to_sync(testMethod)) if not skipped: try: self._pre_setup() except Exception: if debug: raise result.addError(self, sys.exc_info()) return if debug: super().debug() else: super().__call__(result) if not skipped: try: self._post_teardown() except Exception: if debug: raise result.addError(self, sys.exc_info()) return def _pre_setup(self): """ Perform pre-test setup: * Create a test client. * Clear the mail test outbox. """ self.client = self.client_class() self.async_client = self.async_client_class() mail.outbox = [] def _post_teardown(self): """Perform post-test things.""" pass def settings(self, **kwargs): """ A context manager that temporarily sets a setting and reverts to the original value when exiting the context. """ return override_settings(**kwargs) def modify_settings(self, **kwargs): """ A context manager that temporarily applies changes a list setting and reverts back to the original value when exiting the context. """ return modify_settings(**kwargs) def assertRedirects(self, response, expected_url, status_code=302, target_status_code=200, msg_prefix='', fetch_redirect_response=True): """ Assert that a response redirected to a specific URL and that the redirect URL can be loaded. Won't work for external links since it uses the test client to do a request (use fetch_redirect_response=False to check such links without fetching them). """ if msg_prefix: msg_prefix += ": " if hasattr(response, 'redirect_chain'): # The request was a followed redirect self.assertTrue( response.redirect_chain, msg_prefix + "Response didn't redirect as expected: Response code was %d (expected %d)" % (response.status_code, status_code) ) self.assertEqual( response.redirect_chain[0][1], status_code, msg_prefix + "Initial response didn't redirect as expected: Response code was %d (expected %d)" % (response.redirect_chain[0][1], status_code) ) url, status_code = response.redirect_chain[-1] scheme, netloc, path, query, fragment = urlsplit(url) self.assertEqual( response.status_code, target_status_code, msg_prefix + "Response didn't redirect as expected: Final Response code was %d (expected %d)" % (response.status_code, target_status_code) ) else: # Not a followed redirect self.assertEqual( response.status_code, status_code, msg_prefix + "Response didn't redirect as expected: Response code was %d (expected %d)" % (response.status_code, status_code) ) url = response.url scheme, netloc, path, query, fragment = urlsplit(url) # Prepend the request path to handle relative path redirects. if not path.startswith('/'): url = urljoin(response.request['PATH_INFO'], url) path = urljoin(response.request['PATH_INFO'], path) if fetch_redirect_response: # netloc might be empty, or in cases where Django tests the # HTTP scheme, the convention is for netloc to be 'testserver'. # Trust both as "internal" URLs here. domain, port = split_domain_port(netloc) if domain and not validate_host(domain, settings.ALLOWED_HOSTS): raise ValueError( "The test client is unable to fetch remote URLs (got %s). " "If the host is served by Django, add '%s' to ALLOWED_HOSTS. " "Otherwise, use assertRedirects(..., fetch_redirect_response=False)." % (url, domain) ) # Get the redirection page, using the same client that was used # to obtain the original response. extra = response.client.extra or {} redirect_response = response.client.get( path, QueryDict(query), secure=(scheme == 'https'), **extra, ) self.assertEqual( redirect_response.status_code, target_status_code, msg_prefix + "Couldn't retrieve redirection page '%s': response code was %d (expected %d)" % (path, redirect_response.status_code, target_status_code) ) self.assertURLEqual( url, expected_url, msg_prefix + "Response redirected to '%s', expected '%s'" % (url, expected_url) ) def assertURLEqual(self, url1, url2, msg_prefix=''): """ Assert that two URLs are the same, ignoring the order of query string parameters except for parameters with the same name. For example, /path/?x=1&y=2 is equal to /path/?y=2&x=1, but /path/?a=1&a=2 isn't equal to /path/?a=2&a=1. """ def normalize(url): """Sort the URL's query string parameters.""" url = str(url) # Coerce reverse_lazy() URLs. scheme, netloc, path, params, query, fragment = urlparse(url) query_parts = sorted(parse_qsl(query)) return urlunparse((scheme, netloc, path, params, urlencode(query_parts), fragment)) self.assertEqual( normalize(url1), normalize(url2), msg_prefix + "Expected '%s' to equal '%s'." % (url1, url2) ) def _assert_contains(self, response, text, status_code, msg_prefix, html): # If the response supports deferred rendering and hasn't been rendered # yet, then ensure that it does get rendered before proceeding further. if hasattr(response, 'render') and callable(response.render) and not response.is_rendered: response.render() if msg_prefix: msg_prefix += ": " self.assertEqual( response.status_code, status_code, msg_prefix + "Couldn't retrieve content: Response code was %d" " (expected %d)" % (response.status_code, status_code) ) if response.streaming: content = b''.join(response.streaming_content) else: content = response.content if not isinstance(text, bytes) or html: text = str(text) content = content.decode(response.charset) text_repr = "'%s'" % text else: text_repr = repr(text) if html: content = assert_and_parse_html(self, content, None, "Response's content is not valid HTML:") text = assert_and_parse_html(self, text, None, "Second argument is not valid HTML:") real_count = content.count(text) return (text_repr, real_count, msg_prefix) def assertContains(self, response, text, count=None, status_code=200, msg_prefix='', html=False): """ Assert that a response indicates that some content was retrieved successfully, (i.e., the HTTP status code was as expected) and that ``text`` occurs ``count`` times in the content of the response. If ``count`` is None, the count doesn't matter - the assertion is true if the text occurs at least once in the response. """ text_repr, real_count, msg_prefix = self._assert_contains( response, text, status_code, msg_prefix, html) if count is not None: self.assertEqual( real_count, count, msg_prefix + "Found %d instances of %s in response (expected %d)" % (real_count, text_repr, count) ) else: self.assertTrue(real_count != 0, msg_prefix + "Couldn't find %s in response" % text_repr) def assertNotContains(self, response, text, status_code=200, msg_prefix='', html=False): """ Assert that a response indicates that some content was retrieved successfully, (i.e., the HTTP status code was as expected) and that ``text`` doesn't occurs in the content of the response. """ text_repr, real_count, msg_prefix = self._assert_contains( response, text, status_code, msg_prefix, html) self.assertEqual(real_count, 0, msg_prefix + "Response should not contain %s" % text_repr) def assertFormError(self, response, form, field, errors, msg_prefix=''): """ Assert that a form used to render the response has a specific field error. """ if msg_prefix: msg_prefix += ": " # Put context(s) into a list to simplify processing. contexts = to_list(response.context) if not contexts: self.fail(msg_prefix + "Response did not use any contexts to render the response") # Put error(s) into a list to simplify processing. errors = to_list(errors) # Search all contexts for the error. found_form = False for i, context in enumerate(contexts): if form not in context: continue found_form = True for err in errors: if field: if field in context[form].errors: field_errors = context[form].errors[field] self.assertTrue( err in field_errors, msg_prefix + "The field '%s' on form '%s' in" " context %d does not contain the error '%s'" " (actual errors: %s)" % (field, form, i, err, repr(field_errors)) ) elif field in context[form].fields: self.fail( msg_prefix + "The field '%s' on form '%s' in context %d contains no errors" % (field, form, i) ) else: self.fail( msg_prefix + "The form '%s' in context %d does not contain the field '%s'" % (form, i, field) ) else: non_field_errors = context[form].non_field_errors() self.assertTrue( err in non_field_errors, msg_prefix + "The form '%s' in context %d does not" " contain the non-field error '%s'" " (actual errors: %s)" % (form, i, err, non_field_errors or 'none') ) if not found_form: self.fail(msg_prefix + "The form '%s' was not used to render the response" % form) def assertFormsetError(self, response, formset, form_index, field, errors, msg_prefix=''): """ Assert that a formset used to render the response has a specific error. For field errors, specify the ``form_index`` and the ``field``. For non-field errors, specify the ``form_index`` and the ``field`` as None. For non-form errors, specify ``form_index`` as None and the ``field`` as None. """ # Add punctuation to msg_prefix if msg_prefix: msg_prefix += ": " # Put context(s) into a list to simplify processing. contexts = to_list(response.context) if not contexts: self.fail(msg_prefix + 'Response did not use any contexts to ' 'render the response') # Put error(s) into a list to simplify processing. errors = to_list(errors) # Search all contexts for the error. found_formset = False for i, context in enumerate(contexts): if formset not in context: continue found_formset = True for err in errors: if field is not None: if field in context[formset].forms[form_index].errors: field_errors = context[formset].forms[form_index].errors[field] self.assertTrue( err in field_errors, msg_prefix + "The field '%s' on formset '%s', " "form %d in context %d does not contain the " "error '%s' (actual errors: %s)" % (field, formset, form_index, i, err, repr(field_errors)) ) elif field in context[formset].forms[form_index].fields: self.fail( msg_prefix + "The field '%s' on formset '%s', form %d in context %d contains no errors" % (field, formset, form_index, i) ) else: self.fail( msg_prefix + "The formset '%s', form %d in context %d does not contain the field '%s'" % (formset, form_index, i, field) ) elif form_index is not None: non_field_errors = context[formset].forms[form_index].non_field_errors() self.assertFalse( not non_field_errors, msg_prefix + "The formset '%s', form %d in context %d " "does not contain any non-field errors." % (formset, form_index, i) ) self.assertTrue( err in non_field_errors, msg_prefix + "The formset '%s', form %d in context %d " "does not contain the non-field error '%s' (actual errors: %s)" % (formset, form_index, i, err, repr(non_field_errors)) ) else: non_form_errors = context[formset].non_form_errors() self.assertFalse( not non_form_errors, msg_prefix + "The formset '%s' in context %d does not " "contain any non-form errors." % (formset, i) ) self.assertTrue( err in non_form_errors, msg_prefix + "The formset '%s' in context %d does not " "contain the non-form error '%s' (actual errors: %s)" % (formset, i, err, repr(non_form_errors)) ) if not found_formset: self.fail(msg_prefix + "The formset '%s' was not used to render the response" % formset) def _assert_template_used(self, response, template_name, msg_prefix): if response is None and template_name is None: raise TypeError('response and/or template_name argument must be provided') if msg_prefix: msg_prefix += ": " if template_name is not None and response is not None and not hasattr(response, 'templates'): raise ValueError( "assertTemplateUsed() and assertTemplateNotUsed() are only " "usable on responses fetched using the Django test Client." ) if not hasattr(response, 'templates') or (response is None and template_name): if response: template_name = response response = None # use this template with context manager return template_name, None, msg_prefix template_names = [t.name for t in response.templates if t.name is not None] return None, template_names, msg_prefix def assertTemplateUsed(self, response=None, template_name=None, msg_prefix='', count=None): """ Assert that the template with the provided name was used in rendering the response. Also usable as context manager. """ context_mgr_template, template_names, msg_prefix = self._assert_template_used( response, template_name, msg_prefix) if context_mgr_template: # Use assertTemplateUsed as context manager. return _AssertTemplateUsedContext(self, context_mgr_template) if not template_names: self.fail(msg_prefix + "No templates used to render the response") self.assertTrue( template_name in template_names, msg_prefix + "Template '%s' was not a template used to render" " the response. Actual template(s) used: %s" % (template_name, ', '.join(template_names)) ) if count is not None: self.assertEqual( template_names.count(template_name), count, msg_prefix + "Template '%s' was expected to be rendered %d " "time(s) but was actually rendered %d time(s)." % (template_name, count, template_names.count(template_name)) ) def assertTemplateNotUsed(self, response=None, template_name=None, msg_prefix=''): """ Assert that the template with the provided name was NOT used in rendering the response. Also usable as context manager. """ context_mgr_template, template_names, msg_prefix = self._assert_template_used( response, template_name, msg_prefix ) if context_mgr_template: # Use assertTemplateNotUsed as context manager. return _AssertTemplateNotUsedContext(self, context_mgr_template) self.assertFalse( template_name in template_names, msg_prefix + "Template '%s' was used unexpectedly in rendering the response" % template_name ) @contextmanager def _assert_raises_or_warns_cm(self, func, cm_attr, expected_exception, expected_message): with func(expected_exception) as cm: yield cm self.assertIn(expected_message, str(getattr(cm, cm_attr))) def _assertFooMessage(self, func, cm_attr, expected_exception, expected_message, *args, **kwargs): callable_obj = None if args: callable_obj, *args = args cm = self._assert_raises_or_warns_cm(func, cm_attr, expected_exception, expected_message) # Assertion used in context manager fashion. if callable_obj is None: return cm # Assertion was passed a callable. with cm: callable_obj(*args, **kwargs) def assertRaisesMessage(self, expected_exception, expected_message, *args, **kwargs): """ Assert that expected_message is found in the message of a raised exception. Args: expected_exception: Exception class expected to be raised. expected_message: expected error message string value. args: Function to be called and extra positional args. kwargs: Extra kwargs. """ return self._assertFooMessage( self.assertRaises, 'exception', expected_exception, expected_message, *args, **kwargs ) def assertWarnsMessage(self, expected_warning, expected_message, *args, **kwargs): """ Same as assertRaisesMessage but for assertWarns() instead of assertRaises(). """ return self._assertFooMessage( self.assertWarns, 'warning', expected_warning, expected_message, *args, **kwargs ) def assertFieldOutput(self, fieldclass, valid, invalid, field_args=None, field_kwargs=None, empty_value=''): """ Assert that a form field behaves correctly with various inputs. Args: fieldclass: the class of the field to be tested. valid: a dictionary mapping valid inputs to their expected cleaned values. invalid: a dictionary mapping invalid inputs to one or more raised error messages. field_args: the args passed to instantiate the field field_kwargs: the kwargs passed to instantiate the field empty_value: the expected clean output for inputs in empty_values """ if field_args is None: field_args = [] if field_kwargs is None: field_kwargs = {} required = fieldclass(*field_args, **field_kwargs) optional = fieldclass(*field_args, **{**field_kwargs, 'required': False}) # test valid inputs for input, output in valid.items(): self.assertEqual(required.clean(input), output) self.assertEqual(optional.clean(input), output) # test invalid inputs for input, errors in invalid.items(): with self.assertRaises(ValidationError) as context_manager: required.clean(input) self.assertEqual(context_manager.exception.messages, errors) with self.assertRaises(ValidationError) as context_manager: optional.clean(input) self.assertEqual(context_manager.exception.messages, errors) # test required inputs error_required = [required.error_messages['required']] for e in required.empty_values: with self.assertRaises(ValidationError) as context_manager: required.clean(e) self.assertEqual(context_manager.exception.messages, error_required) self.assertEqual(optional.clean(e), empty_value) # test that max_length and min_length are always accepted if issubclass(fieldclass, CharField): field_kwargs.update({'min_length': 2, 'max_length': 20}) self.assertIsInstance(fieldclass(*field_args, **field_kwargs), fieldclass) def assertHTMLEqual(self, html1, html2, msg=None): """ Assert that two HTML snippets are semantically the same. Whitespace in most cases is ignored, and attribute ordering is not significant. The arguments must be valid HTML. """ dom1 = assert_and_parse_html(self, html1, msg, 'First argument is not valid HTML:') dom2 = assert_and_parse_html(self, html2, msg, 'Second argument is not valid HTML:') if dom1 != dom2: standardMsg = '%s != %s' % ( safe_repr(dom1, True), safe_repr(dom2, True)) diff = ('\n' + '\n'.join(difflib.ndiff( str(dom1).splitlines(), str(dom2).splitlines(), ))) standardMsg = self._truncateMessage(standardMsg, diff) self.fail(self._formatMessage(msg, standardMsg)) def assertHTMLNotEqual(self, html1, html2, msg=None): """Assert that two HTML snippets are not semantically equivalent.""" dom1 = assert_and_parse_html(self, html1, msg, 'First argument is not valid HTML:') dom2 = assert_and_parse_html(self, html2, msg, 'Second argument is not valid HTML:') if dom1 == dom2: standardMsg = '%s == %s' % ( safe_repr(dom1, True), safe_repr(dom2, True)) self.fail(self._formatMessage(msg, standardMsg)) def assertInHTML(self, needle, haystack, count=None, msg_prefix=''): needle = assert_and_parse_html(self, needle, None, 'First argument is not valid HTML:') haystack = assert_and_parse_html(self, haystack, None, 'Second argument is not valid HTML:') real_count = haystack.count(needle) if count is not None: self.assertEqual( real_count, count, msg_prefix + "Found %d instances of '%s' in response (expected %d)" % (real_count, needle, count) ) else: self.assertTrue(real_count != 0, msg_prefix + "Couldn't find '%s' in response" % needle) def assertJSONEqual(self, raw, expected_data, msg=None): """ Assert that the JSON fragments raw and expected_data are equal. Usual JSON non-significant whitespace rules apply as the heavyweight is delegated to the json library. """ try: data = json.loads(raw) except json.JSONDecodeError: self.fail("First argument is not valid JSON: %r" % raw) if isinstance(expected_data, str): try: expected_data = json.loads(expected_data) except ValueError: self.fail("Second argument is not valid JSON: %r" % expected_data) self.assertEqual(data, expected_data, msg=msg) def assertJSONNotEqual(self, raw, expected_data, msg=None): """ Assert that the JSON fragments raw and expected_data are not equal. Usual JSON non-significant whitespace rules apply as the heavyweight is delegated to the json library. """ try: data = json.loads(raw) except json.JSONDecodeError: self.fail("First argument is not valid JSON: %r" % raw) if isinstance(expected_data, str): try: expected_data = json.loads(expected_data) except json.JSONDecodeError: self.fail("Second argument is not valid JSON: %r" % expected_data) self.assertNotEqual(data, expected_data, msg=msg) def assertXMLEqual(self, xml1, xml2, msg=None): """ Assert that two XML snippets are semantically the same. Whitespace in most cases is ignored and attribute ordering is not significant. The arguments must be valid XML. """ try: result = compare_xml(xml1, xml2) except Exception as e: standardMsg = 'First or second argument is not valid XML\n%s' % e self.fail(self._formatMessage(msg, standardMsg)) else: if not result: standardMsg = '%s != %s' % (safe_repr(xml1, True), safe_repr(xml2, True)) diff = ('\n' + '\n'.join( difflib.ndiff(xml1.splitlines(), xml2.splitlines()) )) standardMsg = self._truncateMessage(standardMsg, diff) self.fail(self._formatMessage(msg, standardMsg)) def assertXMLNotEqual(self, xml1, xml2, msg=None): """ Assert that two XML snippets are not semantically equivalent. Whitespace in most cases is ignored and attribute ordering is not significant. The arguments must be valid XML. """ try: result = compare_xml(xml1, xml2) except Exception as e: standardMsg = 'First or second argument is not valid XML\n%s' % e self.fail(self._formatMessage(msg, standardMsg)) else: if result: standardMsg = '%s == %s' % (safe_repr(xml1, True), safe_repr(xml2, True)) self.fail(self._formatMessage(msg, standardMsg)) class TransactionTestCase(SimpleTestCase): # Subclasses can ask for resetting of auto increment sequence before each # test case reset_sequences = False # Subclasses can enable only a subset of apps for faster tests available_apps = None # Subclasses can define fixtures which will be automatically installed. fixtures = None databases = {DEFAULT_DB_ALIAS} _disallowed_database_msg = ( 'Database %(operation)s to %(alias)r are not allowed in this test. ' 'Add %(alias)r to %(test)s.databases to ensure proper test isolation ' 'and silence this failure.' ) # If transactions aren't available, Django will serialize the database # contents into a fixture during setup and flush and reload them # during teardown (as flush does not restore data from migrations). # This can be slow; this flag allows enabling on a per-case basis. serialized_rollback = False def _pre_setup(self): """ Perform pre-test setup: * If the class has an 'available_apps' attribute, restrict the app registry to these applications, then fire the post_migrate signal -- it must run with the correct set of applications for the test case. * If the class has a 'fixtures' attribute, install those fixtures. """ super()._pre_setup() if self.available_apps is not None: apps.set_available_apps(self.available_apps) setting_changed.send( sender=settings._wrapped.__class__, setting='INSTALLED_APPS', value=self.available_apps, enter=True, ) for db_name in self._databases_names(include_mirrors=False): emit_post_migrate_signal(verbosity=0, interactive=False, db=db_name) try: self._fixture_setup() except Exception: if self.available_apps is not None: apps.unset_available_apps() setting_changed.send( sender=settings._wrapped.__class__, setting='INSTALLED_APPS', value=settings.INSTALLED_APPS, enter=False, ) raise # Clear the queries_log so that it's less likely to overflow (a single # test probably won't execute 9K queries). If queries_log overflows, # then assertNumQueries() doesn't work. for db_name in self._databases_names(include_mirrors=False): connections[db_name].queries_log.clear() @classmethod def _databases_names(cls, include_mirrors=True): # Only consider allowed database aliases, including mirrors or not. return [ alias for alias in connections if alias in cls.databases and ( include_mirrors or not connections[alias].settings_dict['TEST']['MIRROR'] ) ] def _reset_sequences(self, db_name): conn = connections[db_name] if conn.features.supports_sequence_reset: sql_list = conn.ops.sequence_reset_by_name_sql( no_style(), conn.introspection.sequence_list()) if sql_list: with transaction.atomic(using=db_name): with conn.cursor() as cursor: for sql in sql_list: cursor.execute(sql) def _fixture_setup(self): for db_name in self._databases_names(include_mirrors=False): # Reset sequences if self.reset_sequences: self._reset_sequences(db_name) # Provide replica initial data from migrated apps, if needed. if self.serialized_rollback and hasattr(connections[db_name], "_test_serialized_contents"): if self.available_apps is not None: apps.unset_available_apps() connections[db_name].creation.deserialize_db_from_string( connections[db_name]._test_serialized_contents ) if self.available_apps is not None: apps.set_available_apps(self.available_apps) if self.fixtures: # We have to use this slightly awkward syntax due to the fact # that we're using *args and **kwargs together. call_command('loaddata', *self.fixtures, **{'verbosity': 0, 'database': db_name}) def _should_reload_connections(self): return True def _post_teardown(self): """ Perform post-test things: * Flush the contents of the database to leave a clean slate. If the class has an 'available_apps' attribute, don't fire post_migrate. * Force-close the connection so the next test gets a clean cursor. """ try: self._fixture_teardown() super()._post_teardown() if self._should_reload_connections(): # Some DB cursors include SQL statements as part of cursor # creation. If you have a test that does a rollback, the effect # of these statements is lost, which can affect the operation of # tests (e.g., losing a timezone setting causing objects to be # created with the wrong time). To make sure this doesn't # happen, get a clean connection at the start of every test. for conn in connections.all(): conn.close() finally: if self.available_apps is not None: apps.unset_available_apps() setting_changed.send(sender=settings._wrapped.__class__, setting='INSTALLED_APPS', value=settings.INSTALLED_APPS, enter=False) def _fixture_teardown(self): # Allow TRUNCATE ... CASCADE and don't emit the post_migrate signal # when flushing only a subset of the apps for db_name in self._databases_names(include_mirrors=False): # Flush the database inhibit_post_migrate = ( self.available_apps is not None or ( # Inhibit the post_migrate signal when using serialized # rollback to avoid trying to recreate the serialized data. self.serialized_rollback and hasattr(connections[db_name], '_test_serialized_contents') ) ) call_command('flush', verbosity=0, interactive=False, database=db_name, reset_sequences=False, allow_cascade=self.available_apps is not None, inhibit_post_migrate=inhibit_post_migrate) def assertQuerysetEqual(self, qs, values, transform=repr, ordered=True, msg=None): items = map(transform, qs) if not ordered: return self.assertEqual(Counter(items), Counter(values), msg=msg) values = list(values) # For example qs.iterator() could be passed as qs, but it does not # have 'ordered' attribute. if len(values) > 1 and hasattr(qs, 'ordered') and not qs.ordered: raise ValueError("Trying to compare non-ordered queryset " "against more than one ordered values") return self.assertEqual(list(items), values, msg=msg) def assertNumQueries(self, num, func=None, *args, using=DEFAULT_DB_ALIAS, **kwargs): conn = connections[using] context = _AssertNumQueriesContext(self, num, conn) if func is None: return context with context: func(*args, **kwargs) def connections_support_transactions(aliases=None): """ Return whether or not all (or specified) connections support transactions. """ conns = connections.all() if aliases is None else (connections[alias] for alias in aliases) return all(conn.features.supports_transactions for conn in conns) class TestCase(TransactionTestCase): """ Similar to TransactionTestCase, but use `transaction.atomic()` to achieve test isolation. In most situations, TestCase should be preferred to TransactionTestCase as it allows faster execution. However, there are some situations where using TransactionTestCase might be necessary (e.g. testing some transactional behavior). On database backends with no transaction support, TestCase behaves as TransactionTestCase. """ @classmethod def _enter_atomics(cls): """Open atomic blocks for multiple databases.""" atomics = {} for db_name in cls._databases_names(): atomics[db_name] = transaction.atomic(using=db_name) atomics[db_name].__enter__() return atomics @classmethod def _rollback_atomics(cls, atomics): """Rollback atomic blocks opened by the previous method.""" for db_name in reversed(cls._databases_names()): transaction.set_rollback(True, using=db_name) atomics[db_name].__exit__(None, None, None) @classmethod def _databases_support_transactions(cls): return connections_support_transactions(cls.databases) @classmethod def setUpClass(cls): super().setUpClass() if not cls._databases_support_transactions(): return cls.cls_atomics = cls._enter_atomics() if cls.fixtures: for db_name in cls._databases_names(include_mirrors=False): try: call_command('loaddata', *cls.fixtures, **{'verbosity': 0, 'database': db_name}) except Exception: cls._rollback_atomics(cls.cls_atomics) cls._remove_databases_failures() raise try: cls.setUpTestData() except Exception: cls._rollback_atomics(cls.cls_atomics) cls._remove_databases_failures() raise @classmethod def tearDownClass(cls): if cls._databases_support_transactions(): cls._rollback_atomics(cls.cls_atomics) for conn in connections.all(): conn.close() super().tearDownClass() @classmethod def setUpTestData(cls): """Load initial data for the TestCase.""" pass def _should_reload_connections(self): if self._databases_support_transactions(): return False return super()._should_reload_connections() def _fixture_setup(self): if not self._databases_support_transactions(): # If the backend does not support transactions, we should reload # class data before each test self.setUpTestData() return super()._fixture_setup() assert not self.reset_sequences, 'reset_sequences cannot be used on TestCase instances' self.atomics = self._enter_atomics() def _fixture_teardown(self): if not self._databases_support_transactions(): return super()._fixture_teardown() try: for db_name in reversed(self._databases_names()): if self._should_check_constraints(connections[db_name]): connections[db_name].check_constraints() finally: self._rollback_atomics(self.atomics) def _should_check_constraints(self, connection): return ( connection.features.can_defer_constraint_checks and not connection.needs_rollback and connection.is_usable() ) class CheckCondition: """Descriptor class for deferred condition checking.""" def __init__(self, *conditions): self.conditions = conditions def add_condition(self, condition, reason): return self.__class__(*self.conditions, (condition, reason)) def __get__(self, instance, cls=None): # Trigger access for all bases. if any(getattr(base, '__unittest_skip__', False) for base in cls.__bases__): return True for condition, reason in self.conditions: if condition(): # Override this descriptor's value and set the skip reason. cls.__unittest_skip__ = True cls.__unittest_skip_why__ = reason return True return False def _deferredSkip(condition, reason, name): def decorator(test_func): nonlocal condition if not (isinstance(test_func, type) and issubclass(test_func, unittest.TestCase)): @wraps(test_func) def skip_wrapper(*args, **kwargs): if (args and isinstance(args[0], unittest.TestCase) and connection.alias not in getattr(args[0], 'databases', {})): raise ValueError( "%s cannot be used on %s as %s doesn't allow queries " "against the %r database." % ( name, args[0], args[0].__class__.__qualname__, connection.alias, ) ) if condition(): raise unittest.SkipTest(reason) return test_func(*args, **kwargs) test_item = skip_wrapper else: # Assume a class is decorated test_item = test_func databases = getattr(test_item, 'databases', None) if not databases or connection.alias not in databases: # Defer raising to allow importing test class's module. def condition(): raise ValueError( "%s cannot be used on %s as it doesn't allow queries " "against the '%s' database." % ( name, test_item, connection.alias, ) ) # Retrieve the possibly existing value from the class's dict to # avoid triggering the descriptor. skip = test_func.__dict__.get('__unittest_skip__') if isinstance(skip, CheckCondition): test_item.__unittest_skip__ = skip.add_condition(condition, reason) elif skip is not True: test_item.__unittest_skip__ = CheckCondition((condition, reason)) return test_item return decorator def skipIfDBFeature(*features): """Skip a test if a database has at least one of the named features.""" return _deferredSkip( lambda: any(getattr(connection.features, feature, False) for feature in features), "Database has feature(s) %s" % ", ".join(features), 'skipIfDBFeature', ) def skipUnlessDBFeature(*features): """Skip a test unless a database has all the named features.""" return _deferredSkip( lambda: not all(getattr(connection.features, feature, False) for feature in features), "Database doesn't support feature(s): %s" % ", ".join(features), 'skipUnlessDBFeature', ) def skipUnlessAnyDBFeature(*features): """Skip a test unless a database has any of the named features.""" return _deferredSkip( lambda: not any(getattr(connection.features, feature, False) for feature in features), "Database doesn't support any of the feature(s): %s" % ", ".join(features), 'skipUnlessAnyDBFeature', ) class QuietWSGIRequestHandler(WSGIRequestHandler): """ A WSGIRequestHandler that doesn't log to standard output any of the requests received, so as to not clutter the test result output. """ def log_message(*args): pass class FSFilesHandler(WSGIHandler): """ WSGI middleware that intercepts calls to a directory, as defined by one of the *_ROOT settings, and serves those files, publishing them under *_URL. """ def __init__(self, application): self.application = application self.base_url = urlparse(self.get_base_url()) super().__init__() def _should_handle(self, path): """ Check if the path should be handled. Ignore the path if: * the host is provided as part of the base_url * the request's path isn't under the media path (or equal) """ return path.startswith(self.base_url[2]) and not self.base_url[1] def file_path(self, url): """Return the relative path to the file on disk for the given URL.""" relative_url = url[len(self.base_url[2]):] return url2pathname(relative_url) def get_response(self, request): from django.http import Http404 if self._should_handle(request.path): try: return self.serve(request) except Http404: pass return super().get_response(request) def serve(self, request): os_rel_path = self.file_path(request.path) os_rel_path = posixpath.normpath(unquote(os_rel_path)) # Emulate behavior of django.contrib.staticfiles.views.serve() when it # invokes staticfiles' finders functionality. # TODO: Modify if/when that internal API is refactored final_rel_path = os_rel_path.replace('\\', '/').lstrip('/') return serve(request, final_rel_path, document_root=self.get_base_dir()) def __call__(self, environ, start_response): if not self._should_handle(get_path_info(environ)): return self.application(environ, start_response) return super().__call__(environ, start_response) class _StaticFilesHandler(FSFilesHandler): """ Handler for serving static files. A private class that is meant to be used solely as a convenience by LiveServerThread. """ def get_base_dir(self): return settings.STATIC_ROOT def get_base_url(self): return settings.STATIC_URL class _MediaFilesHandler(FSFilesHandler): """ Handler for serving the media files. A private class that is meant to be used solely as a convenience by LiveServerThread. """ def get_base_dir(self): return settings.MEDIA_ROOT def get_base_url(self): return settings.MEDIA_URL class LiveServerThread(threading.Thread): """Thread for running a live http server while the tests are running.""" def __init__(self, host, static_handler, connections_override=None, port=0): self.host = host self.port = port self.is_ready = threading.Event() self.error = None self.static_handler = static_handler self.connections_override = connections_override super().__init__() def run(self): """ Set up the live server and databases, and then loop over handling HTTP requests. """ if self.connections_override: # Override this thread's database connections with the ones # provided by the main thread. for alias, conn in self.connections_override.items(): connections[alias] = conn try: # Create the handler for serving static and media files handler = self.static_handler(_MediaFilesHandler(WSGIHandler())) self.httpd = self._create_server() # If binding to port zero, assign the port allocated by the OS. if self.port == 0: self.port = self.httpd.server_address[1] self.httpd.set_app(handler) self.is_ready.set() self.httpd.serve_forever() except Exception as e: self.error = e self.is_ready.set() finally: connections.close_all() def _create_server(self): return ThreadedWSGIServer((self.host, self.port), QuietWSGIRequestHandler, allow_reuse_address=False) def terminate(self): if hasattr(self, 'httpd'): # Stop the WSGI server self.httpd.shutdown() self.httpd.server_close() self.join() class LiveServerTestCase(TransactionTestCase): """ Do basically the same as TransactionTestCase but also launch a live HTTP server in a separate thread so that the tests may use another testing framework, such as Selenium for example, instead of the built-in dummy client. It inherits from TransactionTestCase instead of TestCase because the threads don't share the same transactions (unless if using in-memory sqlite) and each thread needs to commit all their transactions so that the other thread can see the changes. """ host = 'localhost' port = 0 server_thread_class = LiveServerThread static_handler = _StaticFilesHandler @classproperty def live_server_url(cls): return 'http://%s:%s' % (cls.host, cls.server_thread.port) @classproperty def allowed_host(cls): return cls.host @classmethod def setUpClass(cls): super().setUpClass() connections_override = {} for conn in connections.all(): # If using in-memory sqlite databases, pass the connections to # the server thread. if conn.vendor == 'sqlite' and conn.is_in_memory_db(): # Explicitly enable thread-shareability for this connection conn.inc_thread_sharing() connections_override[conn.alias] = conn cls._live_server_modified_settings = modify_settings( ALLOWED_HOSTS={'append': cls.allowed_host}, ) cls._live_server_modified_settings.enable() cls.server_thread = cls._create_server_thread(connections_override) cls.server_thread.daemon = True cls.server_thread.start() # Wait for the live server to be ready cls.server_thread.is_ready.wait() if cls.server_thread.error: # Clean up behind ourselves, since tearDownClass won't get called in # case of errors. cls._tearDownClassInternal() raise cls.server_thread.error @classmethod def _create_server_thread(cls, connections_override): return cls.server_thread_class( cls.host, cls.static_handler, connections_override=connections_override, port=cls.port, ) @classmethod def _tearDownClassInternal(cls): # There may not be a 'server_thread' attribute if setUpClass() for some # reasons has raised an exception. if hasattr(cls, 'server_thread'): # Terminate the live server's thread cls.server_thread.terminate() # Restore sqlite in-memory database connections' non-shareability. for conn in cls.server_thread.connections_override.values(): conn.dec_thread_sharing() @classmethod def tearDownClass(cls): cls._tearDownClassInternal() cls._live_server_modified_settings.disable() super().tearDownClass() class SerializeMixin: """ Enforce serialization of TestCases that share a common resource. Define a common 'lockfile' for each set of TestCases to serialize. This file must exist on the filesystem. Place it early in the MRO in order to isolate setUpClass()/tearDownClass(). """ lockfile = None @classmethod def setUpClass(cls): if cls.lockfile is None: raise ValueError( "{}.lockfile isn't set. Set it to a unique value " "in the base class.".format(cls.__name__)) cls._lockfile = open(cls.lockfile) locks.lock(cls._lockfile, locks.LOCK_EX) super().setUpClass() @classmethod def tearDownClass(cls): super().tearDownClass() cls._lockfile.close()
da06ae05ed52b56b2d8fc38de7e99f2a4f3e5a2da85f9bb2403abea45cb561e0
import asyncio import logging import re import sys import time import warnings from contextlib import contextmanager from functools import wraps from io import StringIO from itertools import chain from types import SimpleNamespace from unittest import TestCase, skipIf, skipUnless from xml.dom.minidom import Node, parseString from django.apps import apps from django.apps.registry import Apps from django.conf import UserSettingsHolder, settings from django.core import mail from django.core.exceptions import ImproperlyConfigured from django.core.signals import request_started from django.db import DEFAULT_DB_ALIAS, connections, reset_queries from django.db.models.options import Options from django.template import Template from django.test.signals import setting_changed, template_rendered from django.urls import get_script_prefix, set_script_prefix from django.utils.translation import deactivate try: import jinja2 except ImportError: jinja2 = None __all__ = ( 'Approximate', 'ContextList', 'isolate_lru_cache', 'get_runner', 'modify_settings', 'override_settings', 'requires_tz_support', 'setup_test_environment', 'teardown_test_environment', ) TZ_SUPPORT = hasattr(time, 'tzset') class Approximate: def __init__(self, val, places=7): self.val = val self.places = places def __repr__(self): return repr(self.val) def __eq__(self, other): return self.val == other or round(abs(self.val - other), self.places) == 0 class ContextList(list): """ A wrapper that provides direct key access to context items contained in a list of context objects. """ def __getitem__(self, key): if isinstance(key, str): for subcontext in self: if key in subcontext: return subcontext[key] raise KeyError(key) else: return super().__getitem__(key) def get(self, key, default=None): try: return self.__getitem__(key) except KeyError: return default def __contains__(self, key): try: self[key] except KeyError: return False return True def keys(self): """ Flattened keys of subcontexts. """ return set(chain.from_iterable(d for subcontext in self for d in subcontext)) def instrumented_test_render(self, context): """ An instrumented Template render method, providing a signal that can be intercepted by the test Client. """ template_rendered.send(sender=self, template=self, context=context) return self.nodelist.render(context) class _TestState: pass def setup_test_environment(debug=None): """ Perform global pre-test setup, such as installing the instrumented template renderer and setting the email backend to the locmem email backend. """ if hasattr(_TestState, 'saved_data'): # Executing this function twice would overwrite the saved values. raise RuntimeError( "setup_test_environment() was already called and can't be called " "again without first calling teardown_test_environment()." ) if debug is None: debug = settings.DEBUG saved_data = SimpleNamespace() _TestState.saved_data = saved_data saved_data.allowed_hosts = settings.ALLOWED_HOSTS # Add the default host of the test client. settings.ALLOWED_HOSTS = [*settings.ALLOWED_HOSTS, 'testserver'] saved_data.debug = settings.DEBUG settings.DEBUG = debug saved_data.email_backend = settings.EMAIL_BACKEND settings.EMAIL_BACKEND = 'django.core.mail.backends.locmem.EmailBackend' saved_data.template_render = Template._render Template._render = instrumented_test_render mail.outbox = [] deactivate() def teardown_test_environment(): """ Perform any global post-test teardown, such as restoring the original template renderer and restoring the email sending functions. """ saved_data = _TestState.saved_data settings.ALLOWED_HOSTS = saved_data.allowed_hosts settings.DEBUG = saved_data.debug settings.EMAIL_BACKEND = saved_data.email_backend Template._render = saved_data.template_render del _TestState.saved_data del mail.outbox def setup_databases(verbosity, interactive, keepdb=False, debug_sql=False, parallel=0, aliases=None, **kwargs): """Create the test databases.""" test_databases, mirrored_aliases = get_unique_databases_and_mirrors(aliases) old_names = [] for db_name, aliases in test_databases.values(): first_alias = None for alias in aliases: connection = connections[alias] old_names.append((connection, db_name, first_alias is None)) # Actually create the database for the first connection if first_alias is None: first_alias = alias connection.creation.create_test_db( verbosity=verbosity, autoclobber=not interactive, keepdb=keepdb, serialize=connection.settings_dict['TEST'].get('SERIALIZE', True), ) if parallel > 1: for index in range(parallel): connection.creation.clone_test_db( suffix=str(index + 1), verbosity=verbosity, keepdb=keepdb, ) # Configure all other connections as mirrors of the first one else: connections[alias].creation.set_as_test_mirror(connections[first_alias].settings_dict) # Configure the test mirrors. for alias, mirror_alias in mirrored_aliases.items(): connections[alias].creation.set_as_test_mirror( connections[mirror_alias].settings_dict) if debug_sql: for alias in connections: connections[alias].force_debug_cursor = True return old_names def dependency_ordered(test_databases, dependencies): """ Reorder test_databases into an order that honors the dependencies described in TEST[DEPENDENCIES]. """ ordered_test_databases = [] resolved_databases = set() # Maps db signature to dependencies of all its aliases dependencies_map = {} # Check that no database depends on its own alias for sig, (_, aliases) in test_databases: all_deps = set() for alias in aliases: all_deps.update(dependencies.get(alias, [])) if not all_deps.isdisjoint(aliases): raise ImproperlyConfigured( "Circular dependency: databases %r depend on each other, " "but are aliases." % aliases ) dependencies_map[sig] = all_deps while test_databases: changed = False deferred = [] # Try to find a DB that has all its dependencies met for signature, (db_name, aliases) in test_databases: if dependencies_map[signature].issubset(resolved_databases): resolved_databases.update(aliases) ordered_test_databases.append((signature, (db_name, aliases))) changed = True else: deferred.append((signature, (db_name, aliases))) if not changed: raise ImproperlyConfigured("Circular dependency in TEST[DEPENDENCIES]") test_databases = deferred return ordered_test_databases def get_unique_databases_and_mirrors(aliases=None): """ Figure out which databases actually need to be created. Deduplicate entries in DATABASES that correspond the same database or are configured as test mirrors. Return two values: - test_databases: ordered mapping of signatures to (name, list of aliases) where all aliases share the same underlying database. - mirrored_aliases: mapping of mirror aliases to original aliases. """ if aliases is None: aliases = connections mirrored_aliases = {} test_databases = {} dependencies = {} default_sig = connections[DEFAULT_DB_ALIAS].creation.test_db_signature() for alias in connections: connection = connections[alias] test_settings = connection.settings_dict['TEST'] if test_settings['MIRROR']: # If the database is marked as a test mirror, save the alias. mirrored_aliases[alias] = test_settings['MIRROR'] elif alias in aliases: # Store a tuple with DB parameters that uniquely identify it. # If we have two aliases with the same values for that tuple, # we only need to create the test database once. item = test_databases.setdefault( connection.creation.test_db_signature(), (connection.settings_dict['NAME'], set()) ) item[1].add(alias) if 'DEPENDENCIES' in test_settings: dependencies[alias] = test_settings['DEPENDENCIES'] else: if alias != DEFAULT_DB_ALIAS and connection.creation.test_db_signature() != default_sig: dependencies[alias] = test_settings.get('DEPENDENCIES', [DEFAULT_DB_ALIAS]) test_databases = dict(dependency_ordered(test_databases.items(), dependencies)) return test_databases, mirrored_aliases def teardown_databases(old_config, verbosity, parallel=0, keepdb=False): """Destroy all the non-mirror databases.""" for connection, old_name, destroy in old_config: if destroy: if parallel > 1: for index in range(parallel): connection.creation.destroy_test_db( suffix=str(index + 1), verbosity=verbosity, keepdb=keepdb, ) connection.creation.destroy_test_db(old_name, verbosity, keepdb) def get_runner(settings, test_runner_class=None): test_runner_class = test_runner_class or settings.TEST_RUNNER test_path = test_runner_class.split('.') # Allow for relative paths if len(test_path) > 1: test_module_name = '.'.join(test_path[:-1]) else: test_module_name = '.' test_module = __import__(test_module_name, {}, {}, test_path[-1]) return getattr(test_module, test_path[-1]) class TestContextDecorator: """ A base class that can either be used as a context manager during tests or as a test function or unittest.TestCase subclass decorator to perform temporary alterations. `attr_name`: attribute assigned the return value of enable() if used as a class decorator. `kwarg_name`: keyword argument passing the return value of enable() if used as a function decorator. """ def __init__(self, attr_name=None, kwarg_name=None): self.attr_name = attr_name self.kwarg_name = kwarg_name def enable(self): raise NotImplementedError def disable(self): raise NotImplementedError def __enter__(self): return self.enable() def __exit__(self, exc_type, exc_value, traceback): self.disable() def decorate_class(self, cls): if issubclass(cls, TestCase): decorated_setUp = cls.setUp decorated_tearDown = cls.tearDown def setUp(inner_self): context = self.enable() if self.attr_name: setattr(inner_self, self.attr_name, context) try: decorated_setUp(inner_self) except Exception: self.disable() raise def tearDown(inner_self): decorated_tearDown(inner_self) self.disable() cls.setUp = setUp cls.tearDown = tearDown return cls raise TypeError('Can only decorate subclasses of unittest.TestCase') def decorate_callable(self, func): if asyncio.iscoroutinefunction(func): # If the inner function is an async function, we must execute async # as well so that the `with` statement executes at the right time. @wraps(func) async def inner(*args, **kwargs): with self as context: if self.kwarg_name: kwargs[self.kwarg_name] = context return await func(*args, **kwargs) else: @wraps(func) def inner(*args, **kwargs): with self as context: if self.kwarg_name: kwargs[self.kwarg_name] = context return func(*args, **kwargs) return inner def __call__(self, decorated): if isinstance(decorated, type): return self.decorate_class(decorated) elif callable(decorated): return self.decorate_callable(decorated) raise TypeError('Cannot decorate object of type %s' % type(decorated)) class override_settings(TestContextDecorator): """ Act as either a decorator or a context manager. If it's a decorator, take a function and return a wrapped function. If it's a contextmanager, use it with the ``with`` statement. In either event, entering/exiting are called before and after, respectively, the function/block is executed. """ enable_exception = None def __init__(self, **kwargs): self.options = kwargs super().__init__() def enable(self): # Keep this code at the beginning to leave the settings unchanged # in case it raises an exception because INSTALLED_APPS is invalid. if 'INSTALLED_APPS' in self.options: try: apps.set_installed_apps(self.options['INSTALLED_APPS']) except Exception: apps.unset_installed_apps() raise override = UserSettingsHolder(settings._wrapped) for key, new_value in self.options.items(): setattr(override, key, new_value) self.wrapped = settings._wrapped settings._wrapped = override for key, new_value in self.options.items(): try: setting_changed.send( sender=settings._wrapped.__class__, setting=key, value=new_value, enter=True, ) except Exception as exc: self.enable_exception = exc self.disable() def disable(self): if 'INSTALLED_APPS' in self.options: apps.unset_installed_apps() settings._wrapped = self.wrapped del self.wrapped responses = [] for key in self.options: new_value = getattr(settings, key, None) responses_for_setting = setting_changed.send_robust( sender=settings._wrapped.__class__, setting=key, value=new_value, enter=False, ) responses.extend(responses_for_setting) if self.enable_exception is not None: exc = self.enable_exception self.enable_exception = None raise exc for _, response in responses: if isinstance(response, Exception): raise response def save_options(self, test_func): if test_func._overridden_settings is None: test_func._overridden_settings = self.options else: # Duplicate dict to prevent subclasses from altering their parent. test_func._overridden_settings = { **test_func._overridden_settings, **self.options, } def decorate_class(self, cls): from django.test import SimpleTestCase if not issubclass(cls, SimpleTestCase): raise ValueError( "Only subclasses of Django SimpleTestCase can be decorated " "with override_settings") self.save_options(cls) return cls class modify_settings(override_settings): """ Like override_settings, but makes it possible to append, prepend, or remove items instead of redefining the entire list. """ def __init__(self, *args, **kwargs): if args: # Hack used when instantiating from SimpleTestCase.setUpClass. assert not kwargs self.operations = args[0] else: assert not args self.operations = list(kwargs.items()) super(override_settings, self).__init__() def save_options(self, test_func): if test_func._modified_settings is None: test_func._modified_settings = self.operations else: # Duplicate list to prevent subclasses from altering their parent. test_func._modified_settings = list( test_func._modified_settings) + self.operations def enable(self): self.options = {} for name, operations in self.operations: try: # When called from SimpleTestCase.setUpClass, values may be # overridden several times; cumulate changes. value = self.options[name] except KeyError: value = list(getattr(settings, name, [])) for action, items in operations.items(): # items my be a single value or an iterable. if isinstance(items, str): items = [items] if action == 'append': value = value + [item for item in items if item not in value] elif action == 'prepend': value = [item for item in items if item not in value] + value elif action == 'remove': value = [item for item in value if item not in items] else: raise ValueError("Unsupported action: %s" % action) self.options[name] = value super().enable() class override_system_checks(TestContextDecorator): """ Act as a decorator. Override list of registered system checks. Useful when you override `INSTALLED_APPS`, e.g. if you exclude `auth` app, you also need to exclude its system checks. """ def __init__(self, new_checks, deployment_checks=None): from django.core.checks.registry import registry self.registry = registry self.new_checks = new_checks self.deployment_checks = deployment_checks super().__init__() def enable(self): self.old_checks = self.registry.registered_checks self.registry.registered_checks = set() for check in self.new_checks: self.registry.register(check, *getattr(check, 'tags', ())) self.old_deployment_checks = self.registry.deployment_checks if self.deployment_checks is not None: self.registry.deployment_checks = set() for check in self.deployment_checks: self.registry.register(check, *getattr(check, 'tags', ()), deploy=True) def disable(self): self.registry.registered_checks = self.old_checks self.registry.deployment_checks = self.old_deployment_checks def compare_xml(want, got): """ Try to do a 'xml-comparison' of want and got. Plain string comparison doesn't always work because, for example, attribute ordering should not be important. Ignore comment nodes, processing instructions, document type node, and leading and trailing whitespaces. Based on https://github.com/lxml/lxml/blob/master/src/lxml/doctestcompare.py """ _norm_whitespace_re = re.compile(r'[ \t\n][ \t\n]+') def norm_whitespace(v): return _norm_whitespace_re.sub(' ', v) def child_text(element): return ''.join(c.data for c in element.childNodes if c.nodeType == Node.TEXT_NODE) def children(element): return [c for c in element.childNodes if c.nodeType == Node.ELEMENT_NODE] def norm_child_text(element): return norm_whitespace(child_text(element)) def attrs_dict(element): return dict(element.attributes.items()) def check_element(want_element, got_element): if want_element.tagName != got_element.tagName: return False if norm_child_text(want_element) != norm_child_text(got_element): return False if attrs_dict(want_element) != attrs_dict(got_element): return False want_children = children(want_element) got_children = children(got_element) if len(want_children) != len(got_children): return False return all(check_element(want, got) for want, got in zip(want_children, got_children)) def first_node(document): for node in document.childNodes: if node.nodeType not in ( Node.COMMENT_NODE, Node.DOCUMENT_TYPE_NODE, Node.PROCESSING_INSTRUCTION_NODE, ): return node want = want.strip().replace('\\n', '\n') got = got.strip().replace('\\n', '\n') # If the string is not a complete xml document, we may need to add a # root element. This allow us to compare fragments, like "<foo/><bar/>" if not want.startswith('<?xml'): wrapper = '<root>%s</root>' want = wrapper % want got = wrapper % got # Parse the want and got strings, and compare the parsings. want_root = first_node(parseString(want)) got_root = first_node(parseString(got)) return check_element(want_root, got_root) class CaptureQueriesContext: """ Context manager that captures queries executed by the specified connection. """ def __init__(self, connection): self.connection = connection def __iter__(self): return iter(self.captured_queries) def __getitem__(self, index): return self.captured_queries[index] def __len__(self): return len(self.captured_queries) @property def captured_queries(self): return self.connection.queries[self.initial_queries:self.final_queries] def __enter__(self): self.force_debug_cursor = self.connection.force_debug_cursor self.connection.force_debug_cursor = True # Run any initialization queries if needed so that they won't be # included as part of the count. self.connection.ensure_connection() self.initial_queries = len(self.connection.queries_log) self.final_queries = None request_started.disconnect(reset_queries) return self def __exit__(self, exc_type, exc_value, traceback): self.connection.force_debug_cursor = self.force_debug_cursor request_started.connect(reset_queries) if exc_type is not None: return self.final_queries = len(self.connection.queries_log) class ignore_warnings(TestContextDecorator): def __init__(self, **kwargs): self.ignore_kwargs = kwargs if 'message' in self.ignore_kwargs or 'module' in self.ignore_kwargs: self.filter_func = warnings.filterwarnings else: self.filter_func = warnings.simplefilter super().__init__() def enable(self): self.catch_warnings = warnings.catch_warnings() self.catch_warnings.__enter__() self.filter_func('ignore', **self.ignore_kwargs) def disable(self): self.catch_warnings.__exit__(*sys.exc_info()) # On OSes that don't provide tzset (Windows), we can't set the timezone # in which the program runs. As a consequence, we must skip tests that # don't enforce a specific timezone (with timezone.override or equivalent), # or attempt to interpret naive datetimes in the default timezone. requires_tz_support = skipUnless( TZ_SUPPORT, "This test relies on the ability to run a program in an arbitrary " "time zone, but your operating system isn't able to do that." ) @contextmanager def extend_sys_path(*paths): """Context manager to temporarily add paths to sys.path.""" _orig_sys_path = sys.path[:] sys.path.extend(paths) try: yield finally: sys.path = _orig_sys_path @contextmanager def isolate_lru_cache(lru_cache_object): """Clear the cache of an LRU cache object on entering and exiting.""" lru_cache_object.cache_clear() try: yield finally: lru_cache_object.cache_clear() @contextmanager def captured_output(stream_name): """Return a context manager used by captured_stdout/stdin/stderr that temporarily replaces the sys stream *stream_name* with a StringIO. Note: This function and the following ``captured_std*`` are copied from CPython's ``test.support`` module.""" orig_stdout = getattr(sys, stream_name) setattr(sys, stream_name, StringIO()) try: yield getattr(sys, stream_name) finally: setattr(sys, stream_name, orig_stdout) def captured_stdout(): """Capture the output of sys.stdout: with captured_stdout() as stdout: print("hello") self.assertEqual(stdout.getvalue(), "hello\n") """ return captured_output("stdout") def captured_stderr(): """Capture the output of sys.stderr: with captured_stderr() as stderr: print("hello", file=sys.stderr) self.assertEqual(stderr.getvalue(), "hello\n") """ return captured_output("stderr") def captured_stdin(): """Capture the input to sys.stdin: with captured_stdin() as stdin: stdin.write('hello\n') stdin.seek(0) # call test code that consumes from sys.stdin captured = input() self.assertEqual(captured, "hello") """ return captured_output("stdin") @contextmanager def freeze_time(t): """ Context manager to temporarily freeze time.time(). This temporarily modifies the time function of the time module. Modules which import the time function directly (e.g. `from time import time`) won't be affected This isn't meant as a public API, but helps reduce some repetitive code in Django's test suite. """ _real_time = time.time time.time = lambda: t try: yield finally: time.time = _real_time def require_jinja2(test_func): """ Decorator to enable a Jinja2 template engine in addition to the regular Django template engine for a test or skip it if Jinja2 isn't available. """ test_func = skipIf(jinja2 is None, "this test requires jinja2")(test_func) return override_settings(TEMPLATES=[{ 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'APP_DIRS': True, }, { 'BACKEND': 'django.template.backends.jinja2.Jinja2', 'APP_DIRS': True, 'OPTIONS': {'keep_trailing_newline': True}, }])(test_func) class override_script_prefix(TestContextDecorator): """Decorator or context manager to temporary override the script prefix.""" def __init__(self, prefix): self.prefix = prefix super().__init__() def enable(self): self.old_prefix = get_script_prefix() set_script_prefix(self.prefix) def disable(self): set_script_prefix(self.old_prefix) class LoggingCaptureMixin: """ Capture the output from the 'django' logger and store it on the class's logger_output attribute. """ def setUp(self): self.logger = logging.getLogger('django') self.old_stream = self.logger.handlers[0].stream self.logger_output = StringIO() self.logger.handlers[0].stream = self.logger_output def tearDown(self): self.logger.handlers[0].stream = self.old_stream class isolate_apps(TestContextDecorator): """ Act as either a decorator or a context manager to register models defined in its wrapped context to an isolated registry. The list of installed apps the isolated registry should contain must be passed as arguments. Two optional keyword arguments can be specified: `attr_name`: attribute assigned the isolated registry if used as a class decorator. `kwarg_name`: keyword argument passing the isolated registry if used as a function decorator. """ def __init__(self, *installed_apps, **kwargs): self.installed_apps = installed_apps super().__init__(**kwargs) def enable(self): self.old_apps = Options.default_apps apps = Apps(self.installed_apps) setattr(Options, 'default_apps', apps) return apps def disable(self): setattr(Options, 'default_apps', self.old_apps) def tag(*tags): """Decorator to add tags to a test class or method.""" def decorator(obj): if hasattr(obj, 'tags'): obj.tags = obj.tags.union(tags) else: setattr(obj, 'tags', set(tags)) return obj return decorator @contextmanager def register_lookup(field, *lookups, lookup_name=None): """ Context manager to temporarily register lookups on a model field using lookup_name (or the lookup's lookup_name if not provided). """ try: for lookup in lookups: field.register_lookup(lookup, lookup_name) yield finally: for lookup in lookups: field._unregister_lookup(lookup, lookup_name)
65283f75d81ec97ad3cbeac0bfbf19669414cf5f68abffb960e1159b15be26d5
import os import time import warnings from asgiref.local import Local from django.apps import apps from django.core.exceptions import ImproperlyConfigured from django.core.signals import setting_changed from django.db import connections, router from django.db.utils import ConnectionRouter from django.dispatch import Signal, receiver from django.utils import timezone from django.utils.formats import FORMAT_SETTINGS, reset_format_cache from django.utils.functional import empty template_rendered = Signal() # Most setting_changed receivers are supposed to be added below, # except for cases where the receiver is related to a contrib app. # Settings that may not work well when using 'override_settings' (#19031) COMPLEX_OVERRIDE_SETTINGS = {'DATABASES'} @receiver(setting_changed) def clear_cache_handlers(**kwargs): if kwargs['setting'] == 'CACHES': from django.core.cache import caches, close_caches close_caches() caches._caches = Local() @receiver(setting_changed) def update_installed_apps(**kwargs): if kwargs['setting'] == 'INSTALLED_APPS': # Rebuild any AppDirectoriesFinder instance. from django.contrib.staticfiles.finders import get_finder get_finder.cache_clear() # Rebuild management commands cache from django.core.management import get_commands get_commands.cache_clear() # Rebuild get_app_template_dirs cache. from django.template.utils import get_app_template_dirs get_app_template_dirs.cache_clear() # Rebuild translations cache. from django.utils.translation import trans_real trans_real._translations = {} @receiver(setting_changed) def update_connections_time_zone(**kwargs): if kwargs['setting'] == 'TIME_ZONE': # Reset process time zone if hasattr(time, 'tzset'): if kwargs['value']: os.environ['TZ'] = kwargs['value'] else: os.environ.pop('TZ', None) time.tzset() # Reset local time zone cache timezone.get_default_timezone.cache_clear() # Reset the database connections' time zone if kwargs['setting'] in {'TIME_ZONE', 'USE_TZ'}: for conn in connections.all(): try: del conn.timezone except AttributeError: pass try: del conn.timezone_name except AttributeError: pass conn.ensure_timezone() @receiver(setting_changed) def clear_routers_cache(**kwargs): if kwargs['setting'] == 'DATABASE_ROUTERS': router.routers = ConnectionRouter().routers @receiver(setting_changed) def reset_template_engines(**kwargs): if kwargs['setting'] in { 'TEMPLATES', 'DEBUG', 'INSTALLED_APPS', }: from django.template import engines try: del engines.templates except AttributeError: pass engines._templates = None engines._engines = {} from django.template.engine import Engine Engine.get_default.cache_clear() from django.forms.renderers import get_default_renderer get_default_renderer.cache_clear() @receiver(setting_changed) def clear_serializers_cache(**kwargs): if kwargs['setting'] == 'SERIALIZATION_MODULES': from django.core import serializers serializers._serializers = {} @receiver(setting_changed) def language_changed(**kwargs): if kwargs['setting'] in {'LANGUAGES', 'LANGUAGE_CODE', 'LOCALE_PATHS'}: from django.utils.translation import trans_real trans_real._default = None trans_real._active = Local() if kwargs['setting'] in {'LANGUAGES', 'LOCALE_PATHS'}: from django.utils.translation import trans_real trans_real._translations = {} trans_real.check_for_language.cache_clear() @receiver(setting_changed) def localize_settings_changed(**kwargs): if kwargs['setting'] in FORMAT_SETTINGS or kwargs['setting'] == 'USE_THOUSAND_SEPARATOR': reset_format_cache() @receiver(setting_changed) def file_storage_changed(**kwargs): if kwargs['setting'] == 'DEFAULT_FILE_STORAGE': from django.core.files.storage import default_storage default_storage._wrapped = empty @receiver(setting_changed) def complex_setting_changed(**kwargs): if kwargs['enter'] and kwargs['setting'] in COMPLEX_OVERRIDE_SETTINGS: # Considering the current implementation of the signals framework, # this stacklevel shows the line containing the override_settings call. warnings.warn("Overriding setting %s can lead to unexpected behavior." % kwargs['setting'], stacklevel=6) @receiver(setting_changed) def root_urlconf_changed(**kwargs): if kwargs['setting'] == 'ROOT_URLCONF': from django.urls import clear_url_caches, set_urlconf clear_url_caches() set_urlconf(None) @receiver(setting_changed) def static_storage_changed(**kwargs): if kwargs['setting'] in { 'STATICFILES_STORAGE', 'STATIC_ROOT', 'STATIC_URL', }: from django.contrib.staticfiles.storage import staticfiles_storage staticfiles_storage._wrapped = empty @receiver(setting_changed) def static_finders_changed(**kwargs): if kwargs['setting'] in { 'STATICFILES_DIRS', 'STATIC_ROOT', }: from django.contrib.staticfiles.finders import get_finder get_finder.cache_clear() @receiver(setting_changed) def auth_password_validators_changed(**kwargs): if kwargs['setting'] == 'AUTH_PASSWORD_VALIDATORS': from django.contrib.auth.password_validation import get_default_password_validators get_default_password_validators.cache_clear() @receiver(setting_changed) def user_model_swapped(**kwargs): if kwargs['setting'] == 'AUTH_USER_MODEL': apps.clear_cache() try: from django.contrib.auth import get_user_model UserModel = get_user_model() except ImproperlyConfigured: # Some tests set an invalid AUTH_USER_MODEL. pass else: from django.contrib.auth import backends backends.UserModel = UserModel from django.contrib.auth import forms forms.UserModel = UserModel from django.contrib.auth.handlers import modwsgi modwsgi.UserModel = UserModel from django.contrib.auth.management.commands import changepassword changepassword.UserModel = UserModel from django.contrib.auth import views views.UserModel = UserModel
76a8c867a89bf2a3d225b1b2c95ee84c042483f5a4b5f631fe4568b3dd2629b2
import os from importlib import import_module from django.core.exceptions import ImproperlyConfigured from django.utils.module_loading import module_has_submodule MODELS_MODULE_NAME = 'models' class AppConfig: """Class representing a Django application and its configuration.""" def __init__(self, app_name, app_module): # Full Python path to the application e.g. 'django.contrib.admin'. self.name = app_name # Root module for the application e.g. <module 'django.contrib.admin' # from 'django/contrib/admin/__init__.py'>. self.module = app_module # Reference to the Apps registry that holds this AppConfig. Set by the # registry when it registers the AppConfig instance. self.apps = None # The following attributes could be defined at the class level in a # subclass, hence the test-and-set pattern. # Last component of the Python path to the application e.g. 'admin'. # This value must be unique across a Django project. if not hasattr(self, 'label'): self.label = app_name.rpartition(".")[2] # Human-readable name for the application e.g. "Admin". if not hasattr(self, 'verbose_name'): self.verbose_name = self.label.title() # Filesystem path to the application directory e.g. # '/path/to/django/contrib/admin'. if not hasattr(self, 'path'): self.path = self._path_from_module(app_module) # Module containing models e.g. <module 'django.contrib.admin.models' # from 'django/contrib/admin/models.py'>. Set by import_models(). # None if the application doesn't have a models module. self.models_module = None # Mapping of lowercase model names to model classes. Initially set to # None to prevent accidental access before import_models() runs. self.models = None def __repr__(self): return '<%s: %s>' % (self.__class__.__name__, self.label) def _path_from_module(self, module): """Attempt to determine app's filesystem path from its module.""" # See #21874 for extended discussion of the behavior of this method in # various cases. # Convert paths to list because Python's _NamespacePath doesn't support # indexing. paths = list(getattr(module, '__path__', [])) if len(paths) != 1: filename = getattr(module, '__file__', None) if filename is not None: paths = [os.path.dirname(filename)] else: # For unknown reasons, sometimes the list returned by __path__ # contains duplicates that must be removed (#25246). paths = list(set(paths)) if len(paths) > 1: raise ImproperlyConfigured( "The app module %r has multiple filesystem locations (%r); " "you must configure this app with an AppConfig subclass " "with a 'path' class attribute." % (module, paths)) elif not paths: raise ImproperlyConfigured( "The app module %r has no filesystem location, " "you must configure this app with an AppConfig subclass " "with a 'path' class attribute." % module) return paths[0] @classmethod def create(cls, entry): """ Factory that creates an app config from an entry in INSTALLED_APPS. """ try: # If import_module succeeds, entry is a path to an app module, # which may specify an app config class with default_app_config. # Otherwise, entry is a path to an app config class or an error. module = import_module(entry) except ImportError: # Track that importing as an app module failed. If importing as an # app config class fails too, we'll trigger the ImportError again. module = None mod_path, _, cls_name = entry.rpartition('.') # Raise the original exception when entry cannot be a path to an # app config class. if not mod_path: raise else: try: # If this works, the app module specifies an app config class. entry = module.default_app_config except AttributeError: # Otherwise, it simply uses the default app config class. return cls(entry, module) else: mod_path, _, cls_name = entry.rpartition('.') # If we're reaching this point, we must attempt to load the app config # class located at <mod_path>.<cls_name> mod = import_module(mod_path) try: cls = getattr(mod, cls_name) except AttributeError: if module is None: # If importing as an app module failed, check if the module # contains any valid AppConfigs and show them as choices. # Otherwise, that error probably contains the most informative # traceback, so trigger it again. candidates = sorted( repr(name) for name, candidate in mod.__dict__.items() if isinstance(candidate, type) and issubclass(candidate, AppConfig) and candidate is not AppConfig ) if candidates: raise ImproperlyConfigured( "'%s' does not contain a class '%s'. Choices are: %s." % (mod_path, cls_name, ', '.join(candidates)) ) import_module(entry) else: raise # Check for obvious errors. (This check prevents duck typing, but # it could be removed if it became a problem in practice.) if not issubclass(cls, AppConfig): raise ImproperlyConfigured( "'%s' isn't a subclass of AppConfig." % entry) # Obtain app name here rather than in AppClass.__init__ to keep # all error checking for entries in INSTALLED_APPS in one place. try: app_name = cls.name except AttributeError: raise ImproperlyConfigured( "'%s' must supply a name attribute." % entry) # Ensure app_name points to a valid module. try: app_module = import_module(app_name) except ImportError: raise ImproperlyConfigured( "Cannot import '%s'. Check that '%s.%s.name' is correct." % ( app_name, mod_path, cls_name, ) ) # Entry is a path to an app config class. return cls(app_name, app_module) def get_model(self, model_name, require_ready=True): """ Return the model with the given case-insensitive model_name. Raise LookupError if no model exists with this name. """ if require_ready: self.apps.check_models_ready() else: self.apps.check_apps_ready() try: return self.models[model_name.lower()] except KeyError: raise LookupError( "App '%s' doesn't have a '%s' model." % (self.label, model_name)) def get_models(self, include_auto_created=False, include_swapped=False): """ Return an iterable of models. By default, the following models aren't included: - auto-created models for many-to-many relations without an explicit intermediate table, - models that have been swapped out. Set the corresponding keyword argument to True to include such models. Keyword arguments aren't documented; they're a private API. """ self.apps.check_models_ready() for model in self.models.values(): if model._meta.auto_created and not include_auto_created: continue if model._meta.swapped and not include_swapped: continue yield model def import_models(self): # Dictionary of models for this app, primarily maintained in the # 'all_models' attribute of the Apps this AppConfig is attached to. self.models = self.apps.all_models[self.label] if module_has_submodule(self.module, MODELS_MODULE_NAME): models_module_name = '%s.%s' % (self.name, MODELS_MODULE_NAME) self.models_module = import_module(models_module_name) def ready(self): """ Override this method in subclasses to run code when Django starts. """
d5a345e0ec635541d76b6fe85fcfeafb042f06f88040e04b5c54507bd709efb0
import itertools import json import os import re from urllib.parse import unquote from django.apps import apps from django.conf import settings from django.http import HttpResponse, HttpResponseRedirect, JsonResponse from django.template import Context, Engine from django.urls import translate_url from django.utils.formats import get_format from django.utils.http import url_has_allowed_host_and_scheme from django.utils.translation import ( LANGUAGE_SESSION_KEY, check_for_language, get_language, ) from django.utils.translation.trans_real import DjangoTranslation from django.views.generic import View LANGUAGE_QUERY_PARAMETER = 'language' def set_language(request): """ Redirect to a given URL while setting the chosen language in the session (if enabled) and in a cookie. The URL and the language code need to be specified in the request parameters. Since this view changes how the user will see the rest of the site, it must only be accessed as a POST request. If called as a GET request, it will redirect to the page in the request (the 'next' parameter) without changing any state. """ next_url = request.POST.get('next', request.GET.get('next')) if ( (next_url or request.accepts('text/html')) and not url_has_allowed_host_and_scheme( url=next_url, allowed_hosts={request.get_host()}, require_https=request.is_secure(), ) ): next_url = request.META.get('HTTP_REFERER') # HTTP_REFERER may be encoded. next_url = next_url and unquote(next_url) if not url_has_allowed_host_and_scheme( url=next_url, allowed_hosts={request.get_host()}, require_https=request.is_secure(), ): next_url = '/' response = HttpResponseRedirect(next_url) if next_url else HttpResponse(status=204) if request.method == 'POST': lang_code = request.POST.get(LANGUAGE_QUERY_PARAMETER) if lang_code and check_for_language(lang_code): if next_url: next_trans = translate_url(next_url, lang_code) if next_trans != next_url: response = HttpResponseRedirect(next_trans) if hasattr(request, 'session'): # Storing the language in the session is deprecated. # (RemovedInDjango40Warning) request.session[LANGUAGE_SESSION_KEY] = lang_code response.set_cookie( settings.LANGUAGE_COOKIE_NAME, lang_code, max_age=settings.LANGUAGE_COOKIE_AGE, path=settings.LANGUAGE_COOKIE_PATH, domain=settings.LANGUAGE_COOKIE_DOMAIN, secure=settings.LANGUAGE_COOKIE_SECURE, httponly=settings.LANGUAGE_COOKIE_HTTPONLY, samesite=settings.LANGUAGE_COOKIE_SAMESITE, ) return response def get_formats(): """Return all formats strings required for i18n to work.""" FORMAT_SETTINGS = ( 'DATE_FORMAT', 'DATETIME_FORMAT', 'TIME_FORMAT', 'YEAR_MONTH_FORMAT', 'MONTH_DAY_FORMAT', 'SHORT_DATE_FORMAT', 'SHORT_DATETIME_FORMAT', 'FIRST_DAY_OF_WEEK', 'DECIMAL_SEPARATOR', 'THOUSAND_SEPARATOR', 'NUMBER_GROUPING', 'DATE_INPUT_FORMATS', 'TIME_INPUT_FORMATS', 'DATETIME_INPUT_FORMATS' ) return {attr: get_format(attr) for attr in FORMAT_SETTINGS} js_catalog_template = r""" {% autoescape off %} (function(globals) { var django = globals.django || (globals.django = {}); {% if plural %} django.pluralidx = function(n) { var v={{ plural }}; if (typeof(v) == 'boolean') { return v ? 1 : 0; } else { return v; } }; {% else %} django.pluralidx = function(count) { return (count == 1) ? 0 : 1; }; {% endif %} /* gettext library */ django.catalog = django.catalog || {}; {% if catalog_str %} var newcatalog = {{ catalog_str }}; for (var key in newcatalog) { django.catalog[key] = newcatalog[key]; } {% endif %} if (!django.jsi18n_initialized) { django.gettext = function(msgid) { var value = django.catalog[msgid]; if (typeof(value) == 'undefined') { return msgid; } else { return (typeof(value) == 'string') ? value : value[0]; } }; django.ngettext = function(singular, plural, count) { var value = django.catalog[singular]; if (typeof(value) == 'undefined') { return (count == 1) ? singular : plural; } else { return value.constructor === Array ? value[django.pluralidx(count)] : value; } }; django.gettext_noop = function(msgid) { return msgid; }; django.pgettext = function(context, msgid) { var value = django.gettext(context + '\x04' + msgid); if (value.indexOf('\x04') != -1) { value = msgid; } return value; }; django.npgettext = function(context, singular, plural, count) { var value = django.ngettext(context + '\x04' + singular, context + '\x04' + plural, count); if (value.indexOf('\x04') != -1) { value = django.ngettext(singular, plural, count); } return value; }; django.interpolate = function(fmt, obj, named) { if (named) { return fmt.replace(/%\(\w+\)s/g, function(match){return String(obj[match.slice(2,-2)])}); } else { return fmt.replace(/%s/g, function(match){return String(obj.shift())}); } }; /* formatting library */ django.formats = {{ formats_str }}; django.get_format = function(format_type) { var value = django.formats[format_type]; if (typeof(value) == 'undefined') { return format_type; } else { return value; } }; /* add to global namespace */ globals.pluralidx = django.pluralidx; globals.gettext = django.gettext; globals.ngettext = django.ngettext; globals.gettext_noop = django.gettext_noop; globals.pgettext = django.pgettext; globals.npgettext = django.npgettext; globals.interpolate = django.interpolate; globals.get_format = django.get_format; django.jsi18n_initialized = true; } }(this)); {% endautoescape %} """ class JavaScriptCatalog(View): """ Return the selected language catalog as a JavaScript library. Receive the list of packages to check for translations in the `packages` kwarg either from the extra dictionary passed to the path() function or as a plus-sign delimited string from the request. Default is 'django.conf'. You can override the gettext domain for this view, but usually you don't want to do that as JavaScript messages go to the djangojs domain. This might be needed if you deliver your JavaScript source from Django templates. """ domain = 'djangojs' packages = None def get(self, request, *args, **kwargs): locale = get_language() domain = kwargs.get('domain', self.domain) # If packages are not provided, default to all installed packages, as # DjangoTranslation without localedirs harvests them all. packages = kwargs.get('packages', '') packages = packages.split('+') if packages else self.packages paths = self.get_paths(packages) if packages else None self.translation = DjangoTranslation(locale, domain=domain, localedirs=paths) context = self.get_context_data(**kwargs) return self.render_to_response(context) def get_paths(self, packages): allowable_packages = {app_config.name: app_config for app_config in apps.get_app_configs()} app_configs = [allowable_packages[p] for p in packages if p in allowable_packages] if len(app_configs) < len(packages): excluded = [p for p in packages if p not in allowable_packages] raise ValueError( 'Invalid package(s) provided to JavaScriptCatalog: %s' % ','.join(excluded) ) # paths of requested packages return [os.path.join(app.path, 'locale') for app in app_configs] @property def _num_plurals(self): """ Return the number of plurals for this catalog language, or 2 if no plural string is available. """ match = re.search(r'nplurals=\s*(\d+)', self._plural_string or '') if match: return int(match.groups()[0]) return 2 @property def _plural_string(self): """ Return the plural string (including nplurals) for this catalog language, or None if no plural string is available. """ if '' in self.translation._catalog: for line in self.translation._catalog[''].split('\n'): if line.startswith('Plural-Forms:'): return line.split(':', 1)[1].strip() return None def get_plural(self): plural = self._plural_string if plural is not None: # This should be a compiled function of a typical plural-form: # Plural-Forms: nplurals=3; plural=n%10==1 && n%100!=11 ? 0 : # n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2; plural = [el.strip() for el in plural.split(';') if el.strip().startswith('plural=')][0].split('=', 1)[1] return plural def get_catalog(self): pdict = {} num_plurals = self._num_plurals catalog = {} trans_cat = self.translation._catalog trans_fallback_cat = self.translation._fallback._catalog if self.translation._fallback else {} seen_keys = set() for key, value in itertools.chain(trans_cat.items(), trans_fallback_cat.items()): if key == '' or key in seen_keys: continue if isinstance(key, str): catalog[key] = value elif isinstance(key, tuple): msgid, cnt = key pdict.setdefault(msgid, {})[cnt] = value else: raise TypeError(key) seen_keys.add(key) for k, v in pdict.items(): catalog[k] = [v.get(i, '') for i in range(num_plurals)] return catalog def get_context_data(self, **kwargs): return { 'catalog': self.get_catalog(), 'formats': get_formats(), 'plural': self.get_plural(), } def render_to_response(self, context, **response_kwargs): def indent(s): return s.replace('\n', '\n ') template = Engine().from_string(js_catalog_template) context['catalog_str'] = indent( json.dumps(context['catalog'], sort_keys=True, indent=2) ) if context['catalog'] else None context['formats_str'] = indent(json.dumps(context['formats'], sort_keys=True, indent=2)) return HttpResponse(template.render(Context(context)), 'text/javascript; charset="utf-8"') class JSONCatalog(JavaScriptCatalog): """ Return the selected language catalog as a JSON object. Receive the same parameters as JavaScriptCatalog and return a response with a JSON object of the following format: { "catalog": { # Translations catalog }, "formats": { # Language formats for date, time, etc. }, "plural": '...' # Expression for plural forms, or null. } """ def render_to_response(self, context, **response_kwargs): return JsonResponse(context)
0d2502a60dc60d6027d621e0957fc560c2f6235eab3468eea55732be8072b7d0
import functools import re import sys import types from pathlib import Path from django.conf import settings from django.http import Http404, HttpResponse, HttpResponseNotFound from django.template import Context, Engine, TemplateDoesNotExist from django.template.defaultfilters import pprint from django.urls import resolve from django.utils import timezone from django.utils.datastructures import MultiValueDict from django.utils.encoding import force_str from django.utils.module_loading import import_string from django.utils.regex_helper import _lazy_re_compile from django.utils.version import get_docs_version # Minimal Django templates engine to render the error templates # regardless of the project's TEMPLATES setting. Templates are # read directly from the filesystem so that the error handler # works even if the template loader is broken. DEBUG_ENGINE = Engine( debug=True, libraries={'i18n': 'django.templatetags.i18n'}, ) CURRENT_DIR = Path(__file__).parent class CallableSettingWrapper: """ Object to wrap callable appearing in settings. * Not to call in the debug page (#21345). * Not to break the debug page if the callable forbidding to set attributes (#23070). """ def __init__(self, callable_setting): self._wrapped = callable_setting def __repr__(self): return repr(self._wrapped) def technical_500_response(request, exc_type, exc_value, tb, status_code=500): """ Create a technical server error response. The last three arguments are the values returned from sys.exc_info() and friends. """ reporter = get_exception_reporter_class(request)(request, exc_type, exc_value, tb) if request.accepts('text/html'): html = reporter.get_traceback_html() return HttpResponse(html, status=status_code, content_type='text/html') else: text = reporter.get_traceback_text() return HttpResponse(text, status=status_code, content_type='text/plain; charset=utf-8') @functools.lru_cache() def get_default_exception_reporter_filter(): # Instantiate the default filter for the first time and cache it. return import_string(settings.DEFAULT_EXCEPTION_REPORTER_FILTER)() def get_exception_reporter_filter(request): default_filter = get_default_exception_reporter_filter() return getattr(request, 'exception_reporter_filter', default_filter) def get_exception_reporter_class(request): default_exception_reporter_class = import_string(settings.DEFAULT_EXCEPTION_REPORTER) return getattr(request, 'exception_reporter_class', default_exception_reporter_class) class SafeExceptionReporterFilter: """ Use annotations made by the sensitive_post_parameters and sensitive_variables decorators to filter out sensitive information. """ cleansed_substitute = '********************' hidden_settings = _lazy_re_compile('API|TOKEN|KEY|SECRET|PASS|SIGNATURE', flags=re.I) def cleanse_setting(self, key, value): """ Cleanse an individual setting key/value of sensitive content. If the value is a dictionary, recursively cleanse the keys in that dictionary. """ try: if self.hidden_settings.search(key): cleansed = self.cleansed_substitute elif isinstance(value, dict): cleansed = {k: self.cleanse_setting(k, v) for k, v in value.items()} elif isinstance(value, list): cleansed = [self.cleanse_setting('', v) for v in value] elif isinstance(value, tuple): cleansed = tuple([self.cleanse_setting('', v) for v in value]) else: cleansed = value except TypeError: # If the key isn't regex-able, just return as-is. cleansed = value if callable(cleansed): cleansed = CallableSettingWrapper(cleansed) return cleansed def get_safe_settings(self): """ Return a dictionary of the settings module with values of sensitive settings replaced with stars (*********). """ settings_dict = {} for k in dir(settings): if k.isupper(): settings_dict[k] = self.cleanse_setting(k, getattr(settings, k)) return settings_dict def get_safe_request_meta(self, request): """ Return a dictionary of request.META with sensitive values redacted. """ if not hasattr(request, 'META'): return {} return {k: self.cleanse_setting(k, v) for k, v in request.META.items()} def is_active(self, request): """ This filter is to add safety in production environments (i.e. DEBUG is False). If DEBUG is True then your site is not safe anyway. This hook is provided as a convenience to easily activate or deactivate the filter on a per request basis. """ return settings.DEBUG is False def get_cleansed_multivaluedict(self, request, multivaluedict): """ Replace the keys in a MultiValueDict marked as sensitive with stars. This mitigates leaking sensitive POST parameters if something like request.POST['nonexistent_key'] throws an exception (#21098). """ sensitive_post_parameters = getattr(request, 'sensitive_post_parameters', []) if self.is_active(request) and sensitive_post_parameters: multivaluedict = multivaluedict.copy() for param in sensitive_post_parameters: if param in multivaluedict: multivaluedict[param] = self.cleansed_substitute return multivaluedict def get_post_parameters(self, request): """ Replace the values of POST parameters marked as sensitive with stars (*********). """ if request is None: return {} else: sensitive_post_parameters = getattr(request, 'sensitive_post_parameters', []) if self.is_active(request) and sensitive_post_parameters: cleansed = request.POST.copy() if sensitive_post_parameters == '__ALL__': # Cleanse all parameters. for k in cleansed: cleansed[k] = self.cleansed_substitute return cleansed else: # Cleanse only the specified parameters. for param in sensitive_post_parameters: if param in cleansed: cleansed[param] = self.cleansed_substitute return cleansed else: return request.POST def cleanse_special_types(self, request, value): try: # If value is lazy or a complex object of another kind, this check # might raise an exception. isinstance checks that lazy # MultiValueDicts will have a return value. is_multivalue_dict = isinstance(value, MultiValueDict) except Exception as e: return '{!r} while evaluating {!r}'.format(e, value) if is_multivalue_dict: # Cleanse MultiValueDicts (request.POST is the one we usually care about) value = self.get_cleansed_multivaluedict(request, value) return value def get_traceback_frame_variables(self, request, tb_frame): """ Replace the values of variables marked as sensitive with stars (*********). """ # Loop through the frame's callers to see if the sensitive_variables # decorator was used. current_frame = tb_frame.f_back sensitive_variables = None while current_frame is not None: if (current_frame.f_code.co_name == 'sensitive_variables_wrapper' and 'sensitive_variables_wrapper' in current_frame.f_locals): # The sensitive_variables decorator was used, so we take note # of the sensitive variables' names. wrapper = current_frame.f_locals['sensitive_variables_wrapper'] sensitive_variables = getattr(wrapper, 'sensitive_variables', None) break current_frame = current_frame.f_back cleansed = {} if self.is_active(request) and sensitive_variables: if sensitive_variables == '__ALL__': # Cleanse all variables for name in tb_frame.f_locals: cleansed[name] = self.cleansed_substitute else: # Cleanse specified variables for name, value in tb_frame.f_locals.items(): if name in sensitive_variables: value = self.cleansed_substitute else: value = self.cleanse_special_types(request, value) cleansed[name] = value else: # Potentially cleanse the request and any MultiValueDicts if they # are one of the frame variables. for name, value in tb_frame.f_locals.items(): cleansed[name] = self.cleanse_special_types(request, value) if (tb_frame.f_code.co_name == 'sensitive_variables_wrapper' and 'sensitive_variables_wrapper' in tb_frame.f_locals): # For good measure, obfuscate the decorated function's arguments in # the sensitive_variables decorator's frame, in case the variables # associated with those arguments were meant to be obfuscated from # the decorated function's frame. cleansed['func_args'] = self.cleansed_substitute cleansed['func_kwargs'] = self.cleansed_substitute return cleansed.items() class ExceptionReporter: """Organize and coordinate reporting on exceptions.""" def __init__(self, request, exc_type, exc_value, tb, is_email=False): self.request = request self.filter = get_exception_reporter_filter(self.request) self.exc_type = exc_type self.exc_value = exc_value self.tb = tb self.is_email = is_email self.template_info = getattr(self.exc_value, 'template_debug', None) self.template_does_not_exist = False self.postmortem = None def get_traceback_data(self): """Return a dictionary containing traceback information.""" if self.exc_type and issubclass(self.exc_type, TemplateDoesNotExist): self.template_does_not_exist = True self.postmortem = self.exc_value.chain or [self.exc_value] frames = self.get_traceback_frames() for i, frame in enumerate(frames): if 'vars' in frame: frame_vars = [] for k, v in frame['vars']: v = pprint(v) # Trim large blobs of data if len(v) > 4096: v = '%s… <trimmed %d bytes string>' % (v[0:4096], len(v)) frame_vars.append((k, v)) frame['vars'] = frame_vars frames[i] = frame unicode_hint = '' if self.exc_type and issubclass(self.exc_type, UnicodeError): start = getattr(self.exc_value, 'start', None) end = getattr(self.exc_value, 'end', None) if start is not None and end is not None: unicode_str = self.exc_value.args[1] unicode_hint = force_str( unicode_str[max(start - 5, 0):min(end + 5, len(unicode_str))], 'ascii', errors='replace' ) from django import get_version if self.request is None: user_str = None else: try: user_str = str(self.request.user) except Exception: # request.user may raise OperationalError if the database is # unavailable, for example. user_str = '[unable to retrieve the current user]' c = { 'is_email': self.is_email, 'unicode_hint': unicode_hint, 'frames': frames, 'request': self.request, 'request_meta': self.filter.get_safe_request_meta(self.request), 'user_str': user_str, 'filtered_POST_items': list(self.filter.get_post_parameters(self.request).items()), 'settings': self.filter.get_safe_settings(), 'sys_executable': sys.executable, 'sys_version_info': '%d.%d.%d' % sys.version_info[0:3], 'server_time': timezone.now(), 'django_version_info': get_version(), 'sys_path': sys.path, 'template_info': self.template_info, 'template_does_not_exist': self.template_does_not_exist, 'postmortem': self.postmortem, } if self.request is not None: c['request_GET_items'] = self.request.GET.items() c['request_FILES_items'] = self.request.FILES.items() c['request_COOKIES_items'] = self.request.COOKIES.items() # Check whether exception info is available if self.exc_type: c['exception_type'] = self.exc_type.__name__ if self.exc_value: c['exception_value'] = str(self.exc_value) if frames: c['lastframe'] = frames[-1] return c def get_traceback_html(self): """Return HTML version of debug 500 HTTP error page.""" with Path(CURRENT_DIR, 'templates', 'technical_500.html').open(encoding='utf-8') as fh: t = DEBUG_ENGINE.from_string(fh.read()) c = Context(self.get_traceback_data(), use_l10n=False) return t.render(c) def get_traceback_text(self): """Return plain text version of debug 500 HTTP error page.""" with Path(CURRENT_DIR, 'templates', 'technical_500.txt').open(encoding='utf-8') as fh: t = DEBUG_ENGINE.from_string(fh.read()) c = Context(self.get_traceback_data(), autoescape=False, use_l10n=False) return t.render(c) def _get_source(self, filename, loader, module_name): source = None if hasattr(loader, 'get_source'): try: source = loader.get_source(module_name) except ImportError: pass if source is not None: source = source.splitlines() if source is None: try: with open(filename, 'rb') as fp: source = fp.read().splitlines() except OSError: pass return source def _get_lines_from_file(self, filename, lineno, context_lines, loader=None, module_name=None): """ Return context_lines before and after lineno from file. Return (pre_context_lineno, pre_context, context_line, post_context). """ source = self._get_source(filename, loader, module_name) if source is None: return None, [], None, [] # If we just read the source from a file, or if the loader did not # apply tokenize.detect_encoding to decode the source into a # string, then we should do that ourselves. if isinstance(source[0], bytes): encoding = 'ascii' for line in source[:2]: # File coding may be specified. Match pattern from PEP-263 # (https://www.python.org/dev/peps/pep-0263/) match = re.search(br'coding[:=]\s*([-\w.]+)', line) if match: encoding = match.group(1).decode('ascii') break source = [str(sline, encoding, 'replace') for sline in source] lower_bound = max(0, lineno - context_lines) upper_bound = lineno + context_lines try: pre_context = source[lower_bound:lineno] context_line = source[lineno] post_context = source[lineno + 1:upper_bound] except IndexError: return None, [], None, [] return lower_bound, pre_context, context_line, post_context def get_traceback_frames(self): def explicit_or_implicit_cause(exc_value): explicit = getattr(exc_value, '__cause__', None) implicit = getattr(exc_value, '__context__', None) return explicit or implicit # Get the exception and all its causes exceptions = [] exc_value = self.exc_value while exc_value: exceptions.append(exc_value) exc_value = explicit_or_implicit_cause(exc_value) if exc_value in exceptions: # Avoid infinite loop if there's a cyclic reference (#29393). break frames = [] # No exceptions were supplied to ExceptionReporter if not exceptions: return frames # In case there's just one exception, take the traceback from self.tb exc_value = exceptions.pop() tb = self.tb if not exceptions else exc_value.__traceback__ while tb is not None: # Support for __traceback_hide__ which is used by a few libraries # to hide internal frames. if tb.tb_frame.f_locals.get('__traceback_hide__'): tb = tb.tb_next continue filename = tb.tb_frame.f_code.co_filename function = tb.tb_frame.f_code.co_name lineno = tb.tb_lineno - 1 loader = tb.tb_frame.f_globals.get('__loader__') module_name = tb.tb_frame.f_globals.get('__name__') or '' pre_context_lineno, pre_context, context_line, post_context = self._get_lines_from_file( filename, lineno, 7, loader, module_name, ) if pre_context_lineno is None: pre_context_lineno = lineno pre_context = [] context_line = '<source code not available>' post_context = [] frames.append({ 'exc_cause': explicit_or_implicit_cause(exc_value), 'exc_cause_explicit': getattr(exc_value, '__cause__', True), 'tb': tb, 'type': 'django' if module_name.startswith('django.') else 'user', 'filename': filename, 'function': function, 'lineno': lineno + 1, 'vars': self.filter.get_traceback_frame_variables(self.request, tb.tb_frame), 'id': id(tb), 'pre_context': pre_context, 'context_line': context_line, 'post_context': post_context, 'pre_context_lineno': pre_context_lineno + 1, }) # If the traceback for current exception is consumed, try the # other exception. if not tb.tb_next and exceptions: exc_value = exceptions.pop() tb = exc_value.__traceback__ else: tb = tb.tb_next return frames def technical_404_response(request, exception): """Create a technical 404 error response. `exception` is the Http404.""" try: error_url = exception.args[0]['path'] except (IndexError, TypeError, KeyError): error_url = request.path_info[1:] # Trim leading slash try: tried = exception.args[0]['tried'] except (IndexError, TypeError, KeyError): tried = [] else: if (not tried or ( # empty URLconf request.path == '/' and len(tried) == 1 and # default URLconf len(tried[0]) == 1 and getattr(tried[0][0], 'app_name', '') == getattr(tried[0][0], 'namespace', '') == 'admin' )): return default_urlconf(request) urlconf = getattr(request, 'urlconf', settings.ROOT_URLCONF) if isinstance(urlconf, types.ModuleType): urlconf = urlconf.__name__ caller = '' try: resolver_match = resolve(request.path) except Http404: pass else: obj = resolver_match.func if hasattr(obj, '__name__'): caller = obj.__name__ elif hasattr(obj, '__class__') and hasattr(obj.__class__, '__name__'): caller = obj.__class__.__name__ if hasattr(obj, '__module__'): module = obj.__module__ caller = '%s.%s' % (module, caller) with Path(CURRENT_DIR, 'templates', 'technical_404.html').open(encoding='utf-8') as fh: t = DEBUG_ENGINE.from_string(fh.read()) reporter_filter = get_default_exception_reporter_filter() c = Context({ 'urlconf': urlconf, 'root_urlconf': settings.ROOT_URLCONF, 'request_path': error_url, 'urlpatterns': tried, 'reason': str(exception), 'request': request, 'settings': reporter_filter.get_safe_settings(), 'raising_view_name': caller, }) return HttpResponseNotFound(t.render(c), content_type='text/html') def default_urlconf(request): """Create an empty URLconf 404 error response.""" with Path(CURRENT_DIR, 'templates', 'default_urlconf.html').open(encoding='utf-8') as fh: t = DEBUG_ENGINE.from_string(fh.read()) c = Context({ 'version': get_docs_version(), }) return HttpResponse(t.render(c), content_type='text/html')
f8d6dedb794c1a17ca5ce82471717b4d4623304ed04ddf9cc17b296ebe9306d7
""" Default Django settings. Override these with settings in the module pointed to by the DJANGO_SETTINGS_MODULE environment variable. """ # This is defined here as a do-nothing function because we can't import # django.utils.translation -- that module depends on the settings. def gettext_noop(s): return s #################### # CORE # #################### DEBUG = False # Whether the framework should propagate raw exceptions rather than catching # them. This is useful under some testing situations and should never be used # on a live site. DEBUG_PROPAGATE_EXCEPTIONS = False # People who get code error notifications. # In the format [('Full Name', '[email protected]'), ('Full Name', '[email protected]')] ADMINS = [] # List of IP addresses, as strings, that: # * See debug comments, when DEBUG is true # * Receive x-headers INTERNAL_IPS = [] # Hosts/domain names that are valid for this site. # "*" matches anything, ".example.com" matches example.com and all subdomains ALLOWED_HOSTS = [] # Local time zone for this installation. All choices can be found here: # https://en.wikipedia.org/wiki/List_of_tz_zones_by_name (although not all # systems may support all possibilities). When USE_TZ is True, this is # interpreted as the default user time zone. TIME_ZONE = 'America/Chicago' # If you set this to True, Django will use timezone-aware datetimes. USE_TZ = False # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' # Languages we provide translations for, out of the box. LANGUAGES = [ ('af', gettext_noop('Afrikaans')), ('ar', gettext_noop('Arabic')), ('ar-dz', gettext_noop('Algerian Arabic')), ('ast', gettext_noop('Asturian')), ('az', gettext_noop('Azerbaijani')), ('bg', gettext_noop('Bulgarian')), ('be', gettext_noop('Belarusian')), ('bn', gettext_noop('Bengali')), ('br', gettext_noop('Breton')), ('bs', gettext_noop('Bosnian')), ('ca', gettext_noop('Catalan')), ('cs', gettext_noop('Czech')), ('cy', gettext_noop('Welsh')), ('da', gettext_noop('Danish')), ('de', gettext_noop('German')), ('dsb', gettext_noop('Lower Sorbian')), ('el', gettext_noop('Greek')), ('en', gettext_noop('English')), ('en-au', gettext_noop('Australian English')), ('en-gb', gettext_noop('British English')), ('eo', gettext_noop('Esperanto')), ('es', gettext_noop('Spanish')), ('es-ar', gettext_noop('Argentinian Spanish')), ('es-co', gettext_noop('Colombian Spanish')), ('es-mx', gettext_noop('Mexican Spanish')), ('es-ni', gettext_noop('Nicaraguan Spanish')), ('es-ve', gettext_noop('Venezuelan Spanish')), ('et', gettext_noop('Estonian')), ('eu', gettext_noop('Basque')), ('fa', gettext_noop('Persian')), ('fi', gettext_noop('Finnish')), ('fr', gettext_noop('French')), ('fy', gettext_noop('Frisian')), ('ga', gettext_noop('Irish')), ('gd', gettext_noop('Scottish Gaelic')), ('gl', gettext_noop('Galician')), ('he', gettext_noop('Hebrew')), ('hi', gettext_noop('Hindi')), ('hr', gettext_noop('Croatian')), ('hsb', gettext_noop('Upper Sorbian')), ('hu', gettext_noop('Hungarian')), ('hy', gettext_noop('Armenian')), ('ia', gettext_noop('Interlingua')), ('id', gettext_noop('Indonesian')), ('io', gettext_noop('Ido')), ('is', gettext_noop('Icelandic')), ('it', gettext_noop('Italian')), ('ja', gettext_noop('Japanese')), ('ka', gettext_noop('Georgian')), ('kab', gettext_noop('Kabyle')), ('kk', gettext_noop('Kazakh')), ('km', gettext_noop('Khmer')), ('kn', gettext_noop('Kannada')), ('ko', gettext_noop('Korean')), ('lb', gettext_noop('Luxembourgish')), ('lt', gettext_noop('Lithuanian')), ('lv', gettext_noop('Latvian')), ('mk', gettext_noop('Macedonian')), ('ml', gettext_noop('Malayalam')), ('mn', gettext_noop('Mongolian')), ('mr', gettext_noop('Marathi')), ('my', gettext_noop('Burmese')), ('nb', gettext_noop('Norwegian Bokmål')), ('ne', gettext_noop('Nepali')), ('nl', gettext_noop('Dutch')), ('nn', gettext_noop('Norwegian Nynorsk')), ('os', gettext_noop('Ossetic')), ('pa', gettext_noop('Punjabi')), ('pl', gettext_noop('Polish')), ('pt', gettext_noop('Portuguese')), ('pt-br', gettext_noop('Brazilian Portuguese')), ('ro', gettext_noop('Romanian')), ('ru', gettext_noop('Russian')), ('sk', gettext_noop('Slovak')), ('sl', gettext_noop('Slovenian')), ('sq', gettext_noop('Albanian')), ('sr', gettext_noop('Serbian')), ('sr-latn', gettext_noop('Serbian Latin')), ('sv', gettext_noop('Swedish')), ('sw', gettext_noop('Swahili')), ('ta', gettext_noop('Tamil')), ('te', gettext_noop('Telugu')), ('th', gettext_noop('Thai')), ('tr', gettext_noop('Turkish')), ('tt', gettext_noop('Tatar')), ('udm', gettext_noop('Udmurt')), ('uk', gettext_noop('Ukrainian')), ('ur', gettext_noop('Urdu')), ('uz', gettext_noop('Uzbek')), ('vi', gettext_noop('Vietnamese')), ('zh-hans', gettext_noop('Simplified Chinese')), ('zh-hant', gettext_noop('Traditional Chinese')), ] # Languages using BiDi (right-to-left) layout LANGUAGES_BIDI = ["he", "ar", "ar-dz", "fa", "ur"] # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True LOCALE_PATHS = [] # Settings for language cookie LANGUAGE_COOKIE_NAME = 'django_language' LANGUAGE_COOKIE_AGE = None LANGUAGE_COOKIE_DOMAIN = None LANGUAGE_COOKIE_PATH = '/' LANGUAGE_COOKIE_SECURE = False LANGUAGE_COOKIE_HTTPONLY = False LANGUAGE_COOKIE_SAMESITE = None # If you set this to True, Django will format dates, numbers and calendars # according to user current locale. USE_L10N = False # Not-necessarily-technical managers of the site. They get broken link # notifications and other various emails. MANAGERS = ADMINS # Default charset to use for all HttpResponse objects, if a MIME type isn't # manually specified. It's used to construct the Content-Type header. DEFAULT_CHARSET = 'utf-8' # Email address that error messages come from. SERVER_EMAIL = 'root@localhost' # Database connection info. If left empty, will default to the dummy backend. DATABASES = {} # Classes used to implement DB routing behavior. DATABASE_ROUTERS = [] # The email backend to use. For possible shortcuts see django.core.mail. # The default is to use the SMTP backend. # Third-party backends can be specified by providing a Python path # to a module that defines an EmailBackend class. EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend' # Host for sending email. EMAIL_HOST = 'localhost' # Port for sending email. EMAIL_PORT = 25 # Whether to send SMTP 'Date' header in the local time zone or in UTC. EMAIL_USE_LOCALTIME = False # Optional SMTP authentication information for EMAIL_HOST. EMAIL_HOST_USER = '' EMAIL_HOST_PASSWORD = '' EMAIL_USE_TLS = False EMAIL_USE_SSL = False EMAIL_SSL_CERTFILE = None EMAIL_SSL_KEYFILE = None EMAIL_TIMEOUT = None # List of strings representing installed apps. INSTALLED_APPS = [] TEMPLATES = [] # Default form rendering class. FORM_RENDERER = 'django.forms.renderers.DjangoTemplates' # Default email address to use for various automated correspondence from # the site managers. DEFAULT_FROM_EMAIL = 'webmaster@localhost' # Subject-line prefix for email messages send with django.core.mail.mail_admins # or ...mail_managers. Make sure to include the trailing space. EMAIL_SUBJECT_PREFIX = '[Django] ' # Whether to append trailing slashes to URLs. APPEND_SLASH = True # Whether to prepend the "www." subdomain to URLs that don't have it. PREPEND_WWW = False # Override the server-derived value of SCRIPT_NAME FORCE_SCRIPT_NAME = None # List of compiled regular expression objects representing User-Agent strings # that are not allowed to visit any page, systemwide. Use this for bad # robots/crawlers. Here are a few examples: # import re # DISALLOWED_USER_AGENTS = [ # re.compile(r'^NaverBot.*'), # re.compile(r'^EmailSiphon.*'), # re.compile(r'^SiteSucker.*'), # re.compile(r'^sohu-search'), # ] DISALLOWED_USER_AGENTS = [] ABSOLUTE_URL_OVERRIDES = {} # List of compiled regular expression objects representing URLs that need not # be reported by BrokenLinkEmailsMiddleware. Here are a few examples: # import re # IGNORABLE_404_URLS = [ # re.compile(r'^/apple-touch-icon.*\.png$'), # re.compile(r'^/favicon.ico$'), # re.compile(r'^/robots.txt$'), # re.compile(r'^/phpmyadmin/'), # re.compile(r'\.(cgi|php|pl)$'), # ] IGNORABLE_404_URLS = [] # A secret key for this particular Django installation. Used in secret-key # hashing algorithms. Set this in your settings, or Django will complain # loudly. SECRET_KEY = '' # Default file storage mechanism that holds media. DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage' # Absolute filesystem path to the directory that will hold user-uploaded files. # Example: "/var/www/example.com/media/" MEDIA_ROOT = '' # URL that handles the media served from MEDIA_ROOT. # Examples: "http://example.com/media/", "http://media.example.com/" MEDIA_URL = '' # Absolute path to the directory static files should be collected to. # Example: "/var/www/example.com/static/" STATIC_ROOT = None # URL that handles the static files served from STATIC_ROOT. # Example: "http://example.com/static/", "http://static.example.com/" STATIC_URL = None # List of upload handler classes to be applied in order. FILE_UPLOAD_HANDLERS = [ 'django.core.files.uploadhandler.MemoryFileUploadHandler', 'django.core.files.uploadhandler.TemporaryFileUploadHandler', ] # Maximum size, in bytes, of a request before it will be streamed to the # file system instead of into memory. FILE_UPLOAD_MAX_MEMORY_SIZE = 2621440 # i.e. 2.5 MB # Maximum size in bytes of request data (excluding file uploads) that will be # read before a SuspiciousOperation (RequestDataTooBig) is raised. DATA_UPLOAD_MAX_MEMORY_SIZE = 2621440 # i.e. 2.5 MB # Maximum number of GET/POST parameters that will be read before a # SuspiciousOperation (TooManyFieldsSent) is raised. DATA_UPLOAD_MAX_NUMBER_FIELDS = 1000 # Directory in which upload streamed files will be temporarily saved. A value of # `None` will make Django use the operating system's default temporary directory # (i.e. "/tmp" on *nix systems). FILE_UPLOAD_TEMP_DIR = None # The numeric mode to set newly-uploaded files to. The value should be a mode # you'd pass directly to os.chmod; see https://docs.python.org/library/os.html#files-and-directories. FILE_UPLOAD_PERMISSIONS = 0o644 # The numeric mode to assign to newly-created directories, when uploading files. # The value should be a mode as you'd pass to os.chmod; # see https://docs.python.org/library/os.html#files-and-directories. FILE_UPLOAD_DIRECTORY_PERMISSIONS = None # Python module path where user will place custom format definition. # The directory where this setting is pointing should contain subdirectories # named as the locales, containing a formats.py file # (i.e. "myproject.locale" for myproject/locale/en/formats.py etc. use) FORMAT_MODULE_PATH = None # Default formatting for date objects. See all available format strings here: # https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date DATE_FORMAT = 'N j, Y' # Default formatting for datetime objects. See all available format strings here: # https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date DATETIME_FORMAT = 'N j, Y, P' # Default formatting for time objects. See all available format strings here: # https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date TIME_FORMAT = 'P' # Default formatting for date objects when only the year and month are relevant. # See all available format strings here: # https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date YEAR_MONTH_FORMAT = 'F Y' # Default formatting for date objects when only the month and day are relevant. # See all available format strings here: # https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date MONTH_DAY_FORMAT = 'F j' # Default short formatting for date objects. See all available format strings here: # https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date SHORT_DATE_FORMAT = 'm/d/Y' # Default short formatting for datetime objects. # See all available format strings here: # https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date SHORT_DATETIME_FORMAT = 'm/d/Y P' # Default formats to be used when parsing dates from input boxes, in order # See all available format string here: # https://docs.python.org/library/datetime.html#strftime-behavior # * Note that these format strings are different from the ones to display dates DATE_INPUT_FORMATS = [ '%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06' '%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006' '%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006' '%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006' '%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006' ] # Default formats to be used when parsing times from input boxes, in order # See all available format string here: # https://docs.python.org/library/datetime.html#strftime-behavior # * Note that these format strings are different from the ones to display dates TIME_INPUT_FORMATS = [ '%H:%M:%S', # '14:30:59' '%H:%M:%S.%f', # '14:30:59.000200' '%H:%M', # '14:30' ] # Default formats to be used when parsing dates and times from input boxes, # in order # See all available format string here: # https://docs.python.org/library/datetime.html#strftime-behavior # * Note that these format strings are different from the ones to display dates DATETIME_INPUT_FORMATS = [ '%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59' '%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200' '%Y-%m-%d %H:%M', # '2006-10-25 14:30' '%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59' '%m/%d/%Y %H:%M:%S.%f', # '10/25/2006 14:30:59.000200' '%m/%d/%Y %H:%M', # '10/25/2006 14:30' '%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59' '%m/%d/%y %H:%M:%S.%f', # '10/25/06 14:30:59.000200' '%m/%d/%y %H:%M', # '10/25/06 14:30' ] # First day of week, to be used on calendars # 0 means Sunday, 1 means Monday... FIRST_DAY_OF_WEEK = 0 # Decimal separator symbol DECIMAL_SEPARATOR = '.' # Boolean that sets whether to add thousand separator when formatting numbers USE_THOUSAND_SEPARATOR = False # Number of digits that will be together, when splitting them by # THOUSAND_SEPARATOR. 0 means no grouping, 3 means splitting by thousands... NUMBER_GROUPING = 0 # Thousand separator symbol THOUSAND_SEPARATOR = ',' # The tablespaces to use for each model when not specified otherwise. DEFAULT_TABLESPACE = '' DEFAULT_INDEX_TABLESPACE = '' # Default X-Frame-Options header value X_FRAME_OPTIONS = 'DENY' USE_X_FORWARDED_HOST = False USE_X_FORWARDED_PORT = False # The Python dotted path to the WSGI application that Django's internal server # (runserver) will use. If `None`, the return value of # 'django.core.wsgi.get_wsgi_application' is used, thus preserving the same # behavior as previous versions of Django. Otherwise this should point to an # actual WSGI application object. WSGI_APPLICATION = None # If your Django app is behind a proxy that sets a header to specify secure # connections, AND that proxy ensures that user-submitted headers with the # same name are ignored (so that people can't spoof it), set this value to # a tuple of (header_name, header_value). For any requests that come in with # that header/value, request.is_secure() will return True. # WARNING! Only set this if you fully understand what you're doing. Otherwise, # you may be opening yourself up to a security risk. SECURE_PROXY_SSL_HEADER = None ############## # MIDDLEWARE # ############## # List of middleware to use. Order is important; in the request phase, these # middleware will be applied in the order given, and in the response # phase the middleware will be applied in reverse order. MIDDLEWARE = [] ############ # SESSIONS # ############ # Cache to store session data if using the cache session backend. SESSION_CACHE_ALIAS = 'default' # Cookie name. This can be whatever you want. SESSION_COOKIE_NAME = 'sessionid' # Age of cookie, in seconds (default: 2 weeks). SESSION_COOKIE_AGE = 60 * 60 * 24 * 7 * 2 # A string like "example.com", or None for standard domain cookie. SESSION_COOKIE_DOMAIN = None # Whether the session cookie should be secure (https:// only). SESSION_COOKIE_SECURE = False # The path of the session cookie. SESSION_COOKIE_PATH = '/' # Whether to use the HttpOnly flag. SESSION_COOKIE_HTTPONLY = True # Whether to set the flag restricting cookie leaks on cross-site requests. # This can be 'Lax', 'Strict', or None to disable the flag. SESSION_COOKIE_SAMESITE = 'Lax' # Whether to save the session data on every request. SESSION_SAVE_EVERY_REQUEST = False # Whether a user's session cookie expires when the Web browser is closed. SESSION_EXPIRE_AT_BROWSER_CLOSE = False # The module to store session data SESSION_ENGINE = 'django.contrib.sessions.backends.db' # Directory to store session files if using the file session module. If None, # the backend will use a sensible default. SESSION_FILE_PATH = None # class to serialize session data SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer' ######### # CACHE # ######### # The cache backends to use. CACHES = { 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', } } CACHE_MIDDLEWARE_KEY_PREFIX = '' CACHE_MIDDLEWARE_SECONDS = 600 CACHE_MIDDLEWARE_ALIAS = 'default' ################## # AUTHENTICATION # ################## AUTH_USER_MODEL = 'auth.User' AUTHENTICATION_BACKENDS = ['django.contrib.auth.backends.ModelBackend'] LOGIN_URL = '/accounts/login/' LOGIN_REDIRECT_URL = '/accounts/profile/' LOGOUT_REDIRECT_URL = None # The number of days a password reset link is valid for PASSWORD_RESET_TIMEOUT_DAYS = 3 # The number of seconds a password reset link is valid for (default: 3 days). PASSWORD_RESET_TIMEOUT = 60 * 60 * 24 * 3 # the first hasher in this list is the preferred algorithm. any # password using different algorithms will be converted automatically # upon login PASSWORD_HASHERS = [ 'django.contrib.auth.hashers.PBKDF2PasswordHasher', 'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher', 'django.contrib.auth.hashers.Argon2PasswordHasher', 'django.contrib.auth.hashers.BCryptSHA256PasswordHasher', ] AUTH_PASSWORD_VALIDATORS = [] ########### # SIGNING # ########### SIGNING_BACKEND = 'django.core.signing.TimestampSigner' ######## # CSRF # ######## # Dotted path to callable to be used as view when a request is # rejected by the CSRF middleware. CSRF_FAILURE_VIEW = 'django.views.csrf.csrf_failure' # Settings for CSRF cookie. CSRF_COOKIE_NAME = 'csrftoken' CSRF_COOKIE_AGE = 60 * 60 * 24 * 7 * 52 CSRF_COOKIE_DOMAIN = None CSRF_COOKIE_PATH = '/' CSRF_COOKIE_SECURE = False CSRF_COOKIE_HTTPONLY = False CSRF_COOKIE_SAMESITE = 'Lax' CSRF_HEADER_NAME = 'HTTP_X_CSRFTOKEN' CSRF_TRUSTED_ORIGINS = [] CSRF_USE_SESSIONS = False ############ # MESSAGES # ############ # Class to use as messages backend MESSAGE_STORAGE = 'django.contrib.messages.storage.fallback.FallbackStorage' # Default values of MESSAGE_LEVEL and MESSAGE_TAGS are defined within # django.contrib.messages to avoid imports in this settings file. ########### # LOGGING # ########### # The callable to use to configure logging LOGGING_CONFIG = 'logging.config.dictConfig' # Custom logging configuration. LOGGING = {} # Default exception reporter class used in case none has been # specifically assigned to the HttpRequest instance. DEFAULT_EXCEPTION_REPORTER = 'django.views.debug.ExceptionReporter' # Default exception reporter filter class used in case none has been # specifically assigned to the HttpRequest instance. DEFAULT_EXCEPTION_REPORTER_FILTER = 'django.views.debug.SafeExceptionReporterFilter' ########### # TESTING # ########### # The name of the class to use to run the test suite TEST_RUNNER = 'django.test.runner.DiscoverRunner' # Apps that don't need to be serialized at test database creation time # (only apps with migrations are to start with) TEST_NON_SERIALIZED_APPS = [] ############ # FIXTURES # ############ # The list of directories to search for fixtures FIXTURE_DIRS = [] ############### # STATICFILES # ############### # A list of locations of additional static files STATICFILES_DIRS = [] # The default file storage backend used during the build process STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage' # List of finder classes that know how to find static files in # various locations. STATICFILES_FINDERS = [ 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', # 'django.contrib.staticfiles.finders.DefaultStorageFinder', ] ############## # MIGRATIONS # ############## # Migration module overrides for apps, by app label. MIGRATION_MODULES = {} ################# # SYSTEM CHECKS # ################# # List of all issues generated by system checks that should be silenced. Light # issues like warnings, infos or debugs will not generate a message. Silencing # serious issues like errors and criticals does not result in hiding the # message, but Django will not stop you from e.g. running server. SILENCED_SYSTEM_CHECKS = [] ####################### # SECURITY MIDDLEWARE # ####################### SECURE_BROWSER_XSS_FILTER = False SECURE_CONTENT_TYPE_NOSNIFF = True SECURE_HSTS_INCLUDE_SUBDOMAINS = False SECURE_HSTS_PRELOAD = False SECURE_HSTS_SECONDS = 0 SECURE_REDIRECT_EXEMPT = [] SECURE_REFERRER_POLICY = 'same-origin' SECURE_SSL_HOST = None SECURE_SSL_REDIRECT = False
3f722cec8fd77bf7af30a1f9868fecb07ca7b85993cc1eee4f2b272705be5e72
"Functions that help with dynamically creating decorators for views." from functools import partial, update_wrapper, wraps class classonlymethod(classmethod): def __get__(self, instance, cls=None): if instance is not None: raise AttributeError("This method is available only on the class, not on instances.") return super().__get__(instance, cls) def _update_method_wrapper(_wrapper, decorator): # _multi_decorate()'s bound_method isn't available in this scope. Cheat by # using it on a dummy function. @decorator def dummy(*args, **kwargs): pass update_wrapper(_wrapper, dummy) def _multi_decorate(decorators, method): """ Decorate `method` with one or more function decorators. `decorators` can be a single decorator or an iterable of decorators. """ if hasattr(decorators, '__iter__'): # Apply a list/tuple of decorators if 'decorators' is one. Decorator # functions are applied so that the call order is the same as the # order in which they appear in the iterable. decorators = decorators[::-1] else: decorators = [decorators] def _wrapper(self, *args, **kwargs): # bound_method has the signature that 'decorator' expects i.e. no # 'self' argument, but it's a closure over self so it can call # 'func'. Also, wrap method.__get__() in a function because new # attributes can't be set on bound method objects, only on functions. bound_method = partial(method.__get__(self, type(self))) for dec in decorators: bound_method = dec(bound_method) return bound_method(*args, **kwargs) # Copy any attributes that a decorator adds to the function it decorates. for dec in decorators: _update_method_wrapper(_wrapper, dec) # Preserve any existing attributes of 'method', including the name. update_wrapper(_wrapper, method) return _wrapper def method_decorator(decorator, name=''): """ Convert a function decorator into a method decorator """ # 'obj' can be a class or a function. If 'obj' is a function at the time it # is passed to _dec, it will eventually be a method of the class it is # defined on. If 'obj' is a class, the 'name' is required to be the name # of the method that will be decorated. def _dec(obj): if not isinstance(obj, type): return _multi_decorate(decorator, obj) if not (name and hasattr(obj, name)): raise ValueError( "The keyword argument `name` must be the name of a method " "of the decorated class: %s. Got '%s' instead." % (obj, name) ) method = getattr(obj, name) if not callable(method): raise TypeError( "Cannot decorate '%s' as it isn't a callable attribute of " "%s (%s)." % (name, obj, method) ) _wrapper = _multi_decorate(decorator, method) setattr(obj, name, _wrapper) return obj # Don't worry about making _dec look similar to a list/tuple as it's rather # meaningless. if not hasattr(decorator, '__iter__'): update_wrapper(_dec, decorator) # Change the name to aid debugging. obj = decorator if hasattr(decorator, '__name__') else decorator.__class__ _dec.__name__ = 'method_decorator(%s)' % obj.__name__ return _dec def decorator_from_middleware_with_args(middleware_class): """ Like decorator_from_middleware, but return a function that accepts the arguments to be passed to the middleware_class. Use like:: cache_page = decorator_from_middleware_with_args(CacheMiddleware) # ... @cache_page(3600) def my_view(request): # ... """ return make_middleware_decorator(middleware_class) def decorator_from_middleware(middleware_class): """ Given a middleware class (not an instance), return a view decorator. This lets you use middleware functionality on a per-view basis. The middleware is created with no params passed. """ return make_middleware_decorator(middleware_class)() def make_middleware_decorator(middleware_class): def _make_decorator(*m_args, **m_kwargs): def _decorator(view_func): middleware = middleware_class(view_func, *m_args, **m_kwargs) @wraps(view_func) def _wrapped_view(request, *args, **kwargs): if hasattr(middleware, 'process_request'): result = middleware.process_request(request) if result is not None: return result if hasattr(middleware, 'process_view'): result = middleware.process_view(request, view_func, args, kwargs) if result is not None: return result try: response = view_func(request, *args, **kwargs) except Exception as e: if hasattr(middleware, 'process_exception'): result = middleware.process_exception(request, e) if result is not None: return result raise if hasattr(response, 'render') and callable(response.render): if hasattr(middleware, 'process_template_response'): response = middleware.process_template_response(request, response) # Defer running of process_response until after the template # has been rendered: if hasattr(middleware, 'process_response'): def callback(response): return middleware.process_response(request, response) response.add_post_render_callback(callback) else: if hasattr(middleware, 'process_response'): return middleware.process_response(request, response) return response return _wrapped_view return _decorator return _make_decorator def sync_and_async_middleware(func): """ Mark a middleware factory as returning a hybrid middleware supporting both types of request. """ func.sync_capable = True func.async_capable = True return func def sync_only_middleware(func): """ Mark a middleware factory as returning a sync middleware. This is the default. """ func.sync_capable = True func.async_capable = False return func def async_only_middleware(func): """Mark a middleware factory as returning an async middleware.""" func.sync_capable = False func.async_capable = True return func
5e4cf0ab8f1eeaa7b6de092e60c164a4290e53654b9dde842537554864a77d8b
# These classes override date and datetime to ensure that strftime('%Y') # returns four digits (with leading zeros) on years < 1000. # https://bugs.python.org/issue13305 # # Based on code submitted to comp.lang.python by Andrew Dalke # # >>> datetime_safe.date(10, 8, 2).strftime("%Y/%m/%d was a %A") # '0010/08/02 was a Monday' import time as ttime from datetime import ( date as real_date, datetime as real_datetime, time as real_time, ) from django.utils.regex_helper import _lazy_re_compile class date(real_date): def strftime(self, fmt): return strftime(self, fmt) class datetime(real_datetime): def strftime(self, fmt): return strftime(self, fmt) @classmethod def combine(cls, date, time): return cls(date.year, date.month, date.day, time.hour, time.minute, time.second, time.microsecond, time.tzinfo) def date(self): return date(self.year, self.month, self.day) class time(real_time): pass def new_date(d): "Generate a safe date from a datetime.date object." return date(d.year, d.month, d.day) def new_datetime(d): """ Generate a safe datetime from a datetime.date or datetime.datetime object. """ kw = [d.year, d.month, d.day] if isinstance(d, real_datetime): kw.extend([d.hour, d.minute, d.second, d.microsecond, d.tzinfo]) return datetime(*kw) # This library does not support strftime's "%s" or "%y" format strings. # Allowed if there's an even number of "%"s because they are escaped. _illegal_formatting = _lazy_re_compile(r"((^|[^%])(%%)*%[sy])") def _findall(text, substr): # Also finds overlaps sites = [] i = 0 while True: i = text.find(substr, i) if i == -1: break sites.append(i) i += 1 return sites def strftime(dt, fmt): if dt.year >= 1000: return super(type(dt), dt).strftime(fmt) illegal_formatting = _illegal_formatting.search(fmt) if illegal_formatting: raise TypeError("strftime of dates before 1000 does not handle " + illegal_formatting.group(0)) year = dt.year # For every non-leap year century, advance by # 6 years to get into the 28-year repeat cycle delta = 2000 - year off = 6 * (delta // 100 + delta // 400) year = year + off # Move to around the year 2000 year = year + ((2000 - year) // 28) * 28 timetuple = dt.timetuple() s1 = ttime.strftime(fmt, (year,) + timetuple[1:]) sites1 = _findall(s1, str(year)) s2 = ttime.strftime(fmt, (year + 28,) + timetuple[1:]) sites2 = _findall(s2, str(year + 28)) sites = [] for site in sites1: if site in sites2: sites.append(site) s = s1 syear = "%04d" % dt.year for site in sites: s = s[:site] + syear + s[site + 4:] return s
c448a60615d509d094e305c40f066fb55a52f517367ed7ae60559a03b0e8e5ff
import functools import itertools import logging import os import signal import subprocess import sys import threading import time import traceback import weakref from collections import defaultdict from pathlib import Path from types import ModuleType from zipimport import zipimporter from django.apps import apps from django.core.signals import request_finished from django.dispatch import Signal from django.utils.functional import cached_property from django.utils.version import get_version_tuple autoreload_started = Signal() file_changed = Signal() DJANGO_AUTORELOAD_ENV = 'RUN_MAIN' logger = logging.getLogger('django.utils.autoreload') # If an error is raised while importing a file, it's not placed in sys.modules. # This means that any future modifications aren't caught. Keep a list of these # file paths to allow watching them in the future. _error_files = [] _exception = None try: import termios except ImportError: termios = None try: import pywatchman except ImportError: pywatchman = None def check_errors(fn): @functools.wraps(fn) def wrapper(*args, **kwargs): global _exception try: fn(*args, **kwargs) except Exception: _exception = sys.exc_info() et, ev, tb = _exception if getattr(ev, 'filename', None) is None: # get the filename from the last item in the stack filename = traceback.extract_tb(tb)[-1][0] else: filename = ev.filename if filename not in _error_files: _error_files.append(filename) raise return wrapper def raise_last_exception(): global _exception if _exception is not None: raise _exception[1] def ensure_echo_on(): """ Ensure that echo mode is enabled. Some tools such as PDB disable it which causes usability issues after reload. """ if not termios or not sys.stdin.isatty(): return attr_list = termios.tcgetattr(sys.stdin) if not attr_list[3] & termios.ECHO: attr_list[3] |= termios.ECHO if hasattr(signal, 'SIGTTOU'): old_handler = signal.signal(signal.SIGTTOU, signal.SIG_IGN) else: old_handler = None termios.tcsetattr(sys.stdin, termios.TCSANOW, attr_list) if old_handler is not None: signal.signal(signal.SIGTTOU, old_handler) def iter_all_python_module_files(): # This is a hot path during reloading. Create a stable sorted list of # modules based on the module name and pass it to iter_modules_and_files(). # This ensures cached results are returned in the usual case that modules # aren't loaded on the fly. keys = sorted(sys.modules) modules = tuple(m for m in map(sys.modules.__getitem__, keys) if not isinstance(m, weakref.ProxyTypes)) return iter_modules_and_files(modules, frozenset(_error_files)) @functools.lru_cache(maxsize=1) def iter_modules_and_files(modules, extra_files): """Iterate through all modules needed to be watched.""" sys_file_paths = [] for module in modules: # During debugging (with PyDev) the 'typing.io' and 'typing.re' objects # are added to sys.modules, however they are types not modules and so # cause issues here. if not isinstance(module, ModuleType): continue if module.__name__ == '__main__': # __main__ (usually manage.py) doesn't always have a __spec__ set. # Handle this by falling back to using __file__, resolved below. # See https://docs.python.org/reference/import.html#main-spec # __file__ may not exists, e.g. when running ipdb debugger. if hasattr(module, '__file__'): sys_file_paths.append(module.__file__) continue if getattr(module, '__spec__', None) is None: continue spec = module.__spec__ # Modules could be loaded from places without a concrete location. If # this is the case, skip them. if spec.has_location: origin = spec.loader.archive if isinstance(spec.loader, zipimporter) else spec.origin sys_file_paths.append(origin) results = set() for filename in itertools.chain(sys_file_paths, extra_files): if not filename: continue path = Path(filename) try: resolved_path = path.resolve(strict=True).absolute() except FileNotFoundError: # The module could have been removed, don't fail loudly if this # is the case. continue except ValueError as e: # Network filesystems may return null bytes in file paths. logger.debug('"%s" raised when resolving path: "%s"' % (str(e), path)) continue results.add(resolved_path) return frozenset(results) @functools.lru_cache(maxsize=1) def common_roots(paths): """ Return a tuple of common roots that are shared between the given paths. File system watchers operate on directories and aren't cheap to create. Try to find the minimum set of directories to watch that encompass all of the files that need to be watched. """ # Inspired from Werkzeug: # https://github.com/pallets/werkzeug/blob/7477be2853df70a022d9613e765581b9411c3c39/werkzeug/_reloader.py # Create a sorted list of the path components, longest first. path_parts = sorted([x.parts for x in paths], key=len, reverse=True) tree = {} for chunks in path_parts: node = tree # Add each part of the path to the tree. for chunk in chunks: node = node.setdefault(chunk, {}) # Clear the last leaf in the tree. node.clear() # Turn the tree into a list of Path instances. def _walk(node, path): for prefix, child in node.items(): yield from _walk(child, path + (prefix,)) if not node: yield Path(*path) return tuple(_walk(tree, ())) def sys_path_directories(): """ Yield absolute directories from sys.path, ignoring entries that don't exist. """ for path in sys.path: path = Path(path) try: resolved_path = path.resolve(strict=True).absolute() except FileNotFoundError: continue # If the path is a file (like a zip file), watch the parent directory. if resolved_path.is_file(): yield resolved_path.parent else: yield resolved_path def get_child_arguments(): """ Return the executable. This contains a workaround for Windows if the executable is reported to not have the .exe extension which can cause bugs on reloading. """ import django.__main__ args = [sys.executable] + ['-W%s' % o for o in sys.warnoptions] if sys.argv[0] == django.__main__.__file__: # The server was started with `python -m django runserver`. args += ['-m', 'django'] args += sys.argv[1:] else: args += sys.argv return args def trigger_reload(filename): logger.info('%s changed, reloading.', filename) sys.exit(3) def restart_with_reloader(): new_environ = {**os.environ, DJANGO_AUTORELOAD_ENV: 'true'} args = get_child_arguments() while True: p = subprocess.run(args, env=new_environ, close_fds=False) if p.returncode != 3: return p.returncode class BaseReloader: def __init__(self): self.extra_files = set() self.directory_globs = defaultdict(set) self._stop_condition = threading.Event() def watch_dir(self, path, glob): path = Path(path) try: path = path.absolute() except FileNotFoundError: logger.debug( 'Unable to watch directory %s as it cannot be resolved.', path, exc_info=True, ) return logger.debug('Watching dir %s with glob %s.', path, glob) self.directory_globs[path].add(glob) def watched_files(self, include_globs=True): """ Yield all files that need to be watched, including module files and files within globs. """ yield from iter_all_python_module_files() yield from self.extra_files if include_globs: for directory, patterns in self.directory_globs.items(): for pattern in patterns: yield from directory.glob(pattern) def wait_for_apps_ready(self, app_reg, django_main_thread): """ Wait until Django reports that the apps have been loaded. If the given thread has terminated before the apps are ready, then a SyntaxError or other non-recoverable error has been raised. In that case, stop waiting for the apps_ready event and continue processing. Return True if the thread is alive and the ready event has been triggered, or False if the thread is terminated while waiting for the event. """ while django_main_thread.is_alive(): if app_reg.ready_event.wait(timeout=0.1): return True else: logger.debug('Main Django thread has terminated before apps are ready.') return False def run(self, django_main_thread): logger.debug('Waiting for apps ready_event.') self.wait_for_apps_ready(apps, django_main_thread) from django.urls import get_resolver # Prevent a race condition where URL modules aren't loaded when the # reloader starts by accessing the urlconf_module property. try: get_resolver().urlconf_module except Exception: # Loading the urlconf can result in errors during development. # If this occurs then swallow the error and continue. pass logger.debug('Apps ready_event triggered. Sending autoreload_started signal.') autoreload_started.send(sender=self) self.run_loop() def run_loop(self): ticker = self.tick() while not self.should_stop: try: next(ticker) except StopIteration: break self.stop() def tick(self): """ This generator is called in a loop from run_loop. It's important that the method takes care of pausing or otherwise waiting for a period of time. This split between run_loop() and tick() is to improve the testability of the reloader implementations by decoupling the work they do from the loop. """ raise NotImplementedError('subclasses must implement tick().') @classmethod def check_availability(cls): raise NotImplementedError('subclasses must implement check_availability().') def notify_file_changed(self, path): results = file_changed.send(sender=self, file_path=path) logger.debug('%s notified as changed. Signal results: %s.', path, results) if not any(res[1] for res in results): trigger_reload(path) # These are primarily used for testing. @property def should_stop(self): return self._stop_condition.is_set() def stop(self): self._stop_condition.set() class StatReloader(BaseReloader): SLEEP_TIME = 1 # Check for changes once per second. def tick(self): mtimes = {} while True: for filepath, mtime in self.snapshot_files(): old_time = mtimes.get(filepath) mtimes[filepath] = mtime if old_time is None: logger.debug('File %s first seen with mtime %s', filepath, mtime) continue elif mtime > old_time: logger.debug('File %s previous mtime: %s, current mtime: %s', filepath, old_time, mtime) self.notify_file_changed(filepath) time.sleep(self.SLEEP_TIME) yield def snapshot_files(self): # watched_files may produce duplicate paths if globs overlap. seen_files = set() for file in self.watched_files(): if file in seen_files: continue try: mtime = file.stat().st_mtime except OSError: # This is thrown when the file does not exist. continue seen_files.add(file) yield file, mtime @classmethod def check_availability(cls): return True class WatchmanUnavailable(RuntimeError): pass class WatchmanReloader(BaseReloader): def __init__(self): self.roots = defaultdict(set) self.processed_request = threading.Event() self.client_timeout = int(os.environ.get('DJANGO_WATCHMAN_TIMEOUT', 5)) super().__init__() @cached_property def client(self): return pywatchman.client(timeout=self.client_timeout) def _watch_root(self, root): # In practice this shouldn't occur, however, it's possible that a # directory that doesn't exist yet is being watched. If it's outside of # sys.path then this will end up a new root. How to handle this isn't # clear: Not adding the root will likely break when subscribing to the # changes, however, as this is currently an internal API, no files # will be being watched outside of sys.path. Fixing this by checking # inside watch_glob() and watch_dir() is expensive, instead this could # could fall back to the StatReloader if this case is detected? For # now, watching its parent, if possible, is sufficient. if not root.exists(): if not root.parent.exists(): logger.warning('Unable to watch root dir %s as neither it or its parent exist.', root) return root = root.parent result = self.client.query('watch-project', str(root.absolute())) if 'warning' in result: logger.warning('Watchman warning: %s', result['warning']) logger.debug('Watchman watch-project result: %s', result) return result['watch'], result.get('relative_path') @functools.lru_cache() def _get_clock(self, root): return self.client.query('clock', root)['clock'] def _subscribe(self, directory, name, expression): root, rel_path = self._watch_root(directory) query = { 'expression': expression, 'fields': ['name'], 'since': self._get_clock(root), 'dedup_results': True, } if rel_path: query['relative_root'] = rel_path logger.debug('Issuing watchman subscription %s, for root %s. Query: %s', name, root, query) self.client.query('subscribe', root, name, query) def _subscribe_dir(self, directory, filenames): if not directory.exists(): if not directory.parent.exists(): logger.warning('Unable to watch directory %s as neither it or its parent exist.', directory) return prefix = 'files-parent-%s' % directory.name filenames = ['%s/%s' % (directory.name, filename) for filename in filenames] directory = directory.parent expression = ['name', filenames, 'wholename'] else: prefix = 'files' expression = ['name', filenames] self._subscribe(directory, '%s:%s' % (prefix, directory), expression) def _watch_glob(self, directory, patterns): """ Watch a directory with a specific glob. If the directory doesn't yet exist, attempt to watch the parent directory and amend the patterns to include this. It's important this method isn't called more than one per directory when updating all subscriptions. Subsequent calls will overwrite the named subscription, so it must include all possible glob expressions. """ prefix = 'glob' if not directory.exists(): if not directory.parent.exists(): logger.warning('Unable to watch directory %s as neither it or its parent exist.', directory) return prefix = 'glob-parent-%s' % directory.name patterns = ['%s/%s' % (directory.name, pattern) for pattern in patterns] directory = directory.parent expression = ['anyof'] for pattern in patterns: expression.append(['match', pattern, 'wholename']) self._subscribe(directory, '%s:%s' % (prefix, directory), expression) def watched_roots(self, watched_files): extra_directories = self.directory_globs.keys() watched_file_dirs = [f.parent for f in watched_files] sys_paths = list(sys_path_directories()) return frozenset((*extra_directories, *watched_file_dirs, *sys_paths)) def _update_watches(self): watched_files = list(self.watched_files(include_globs=False)) found_roots = common_roots(self.watched_roots(watched_files)) logger.debug('Watching %s files', len(watched_files)) logger.debug('Found common roots: %s', found_roots) # Setup initial roots for performance, shortest roots first. for root in sorted(found_roots): self._watch_root(root) for directory, patterns in self.directory_globs.items(): self._watch_glob(directory, patterns) # Group sorted watched_files by their parent directory. sorted_files = sorted(watched_files, key=lambda p: p.parent) for directory, group in itertools.groupby(sorted_files, key=lambda p: p.parent): # These paths need to be relative to the parent directory. self._subscribe_dir(directory, [str(p.relative_to(directory)) for p in group]) def update_watches(self): try: self._update_watches() except Exception as ex: # If the service is still available, raise the original exception. if self.check_server_status(ex): raise def _check_subscription(self, sub): subscription = self.client.getSubscription(sub) if not subscription: return logger.debug('Watchman subscription %s has results.', sub) for result in subscription: # When using watch-project, it's not simple to get the relative # directory without storing some specific state. Store the full # path to the directory in the subscription name, prefixed by its # type (glob, files). root_directory = Path(result['subscription'].split(':', 1)[1]) logger.debug('Found root directory %s', root_directory) for file in result.get('files', []): self.notify_file_changed(root_directory / file) def request_processed(self, **kwargs): logger.debug('Request processed. Setting update_watches event.') self.processed_request.set() def tick(self): request_finished.connect(self.request_processed) self.update_watches() while True: if self.processed_request.is_set(): self.update_watches() self.processed_request.clear() try: self.client.receive() except pywatchman.SocketTimeout: pass except pywatchman.WatchmanError as ex: logger.debug('Watchman error: %s, checking server status.', ex) self.check_server_status(ex) else: for sub in list(self.client.subs.keys()): self._check_subscription(sub) yield def stop(self): self.client.close() super().stop() def check_server_status(self, inner_ex=None): """Return True if the server is available.""" try: self.client.query('version') except Exception: raise WatchmanUnavailable(str(inner_ex)) from inner_ex return True @classmethod def check_availability(cls): if not pywatchman: raise WatchmanUnavailable('pywatchman not installed.') client = pywatchman.client(timeout=0.1) try: result = client.capabilityCheck() except Exception: # The service is down? raise WatchmanUnavailable('Cannot connect to the watchman service.') version = get_version_tuple(result['version']) # Watchman 4.9 includes multiple improvements to watching project # directories as well as case insensitive filesystems. logger.debug('Watchman version %s', version) if version < (4, 9): raise WatchmanUnavailable('Watchman 4.9 or later is required.') def get_reloader(): """Return the most suitable reloader for this environment.""" try: WatchmanReloader.check_availability() except WatchmanUnavailable: return StatReloader() return WatchmanReloader() def start_django(reloader, main_func, *args, **kwargs): ensure_echo_on() main_func = check_errors(main_func) django_main_thread = threading.Thread(target=main_func, args=args, kwargs=kwargs, name='django-main-thread') django_main_thread.setDaemon(True) django_main_thread.start() while not reloader.should_stop: try: reloader.run(django_main_thread) except WatchmanUnavailable as ex: # It's possible that the watchman service shuts down or otherwise # becomes unavailable. In that case, use the StatReloader. reloader = StatReloader() logger.error('Error connecting to Watchman: %s', ex) logger.info('Watching for file changes with %s', reloader.__class__.__name__) def run_with_reloader(main_func, *args, **kwargs): signal.signal(signal.SIGTERM, lambda *args: sys.exit(0)) try: if os.environ.get(DJANGO_AUTORELOAD_ENV) == 'true': reloader = get_reloader() logger.info('Watching for file changes with %s', reloader.__class__.__name__) start_django(reloader, main_func, *args, **kwargs) else: exit_code = restart_with_reloader() sys.exit(exit_code) except KeyboardInterrupt: pass
bd9cb4eee815ded5304cfa8360bc89bae34ac4f9086de776a4d711653feb5146
from decimal import Decimal from django.conf import settings from django.utils.safestring import mark_safe def format(number, decimal_sep, decimal_pos=None, grouping=0, thousand_sep='', force_grouping=False, use_l10n=None): """ Get a number (as a number or string), and return it as a string, using formats defined as arguments: * decimal_sep: Decimal separator symbol (for example ".") * decimal_pos: Number of decimal positions * grouping: Number of digits in every group limited by thousand separator. For non-uniform digit grouping, it can be a sequence with the number of digit group sizes following the format used by the Python locale module in locale.localeconv() LC_NUMERIC grouping (e.g. (3, 2, 0)). * thousand_sep: Thousand separator symbol (for example ",") """ use_grouping = (use_l10n or (use_l10n is None and settings.USE_L10N)) and settings.USE_THOUSAND_SEPARATOR use_grouping = use_grouping or force_grouping use_grouping = use_grouping and grouping != 0 # Make the common case fast if isinstance(number, int) and not use_grouping and not decimal_pos: return mark_safe(number) # sign sign = '' # Treat potentially very large/small floats as Decimals. if isinstance(number, float) and 'e' in str(number).lower(): number = Decimal(str(number)) if isinstance(number, Decimal): if decimal_pos is not None: # If the provided number is too small to affect any of the visible # decimal places, consider it equal to '0'. cutoff = Decimal('0.' + '1'.rjust(decimal_pos, '0')) if abs(number) < cutoff: number = Decimal('0') # Format values with more than 200 digits (an arbitrary cutoff) using # scientific notation to avoid high memory usage in {:f}'.format(). _, digits, exponent = number.as_tuple() if abs(exponent) + len(digits) > 200: number = '{:e}'.format(number) coefficient, exponent = number.split('e') # Format the coefficient. coefficient = format( coefficient, decimal_sep, decimal_pos, grouping, thousand_sep, force_grouping, use_l10n, ) return '{}e{}'.format(coefficient, exponent) else: str_number = '{:f}'.format(number) else: str_number = str(number) if str_number[0] == '-': sign = '-' str_number = str_number[1:] # decimal part if '.' in str_number: int_part, dec_part = str_number.split('.') if decimal_pos is not None: dec_part = dec_part[:decimal_pos] else: int_part, dec_part = str_number, '' if decimal_pos is not None: dec_part = dec_part + ('0' * (decimal_pos - len(dec_part))) dec_part = dec_part and decimal_sep + dec_part # grouping if use_grouping: try: # if grouping is a sequence intervals = list(grouping) except TypeError: # grouping is a single value intervals = [grouping, 0] active_interval = intervals.pop(0) int_part_gd = '' cnt = 0 for digit in int_part[::-1]: if cnt and cnt == active_interval: if intervals: active_interval = intervals.pop(0) or active_interval int_part_gd += thousand_sep[::-1] cnt = 0 int_part_gd += digit cnt += 1 int_part = int_part_gd[::-1] return sign + int_part + dec_part
620a45745f9a0f927dd8e7db9ba41b6f6069846b0b24ed57ba81a871a0f28c2b
import asyncio import inspect import warnings from asgiref.sync import sync_to_async class RemovedInNextVersionWarning(DeprecationWarning): pass class RemovedInDjango40Warning(PendingDeprecationWarning): pass class warn_about_renamed_method: def __init__(self, class_name, old_method_name, new_method_name, deprecation_warning): self.class_name = class_name self.old_method_name = old_method_name self.new_method_name = new_method_name self.deprecation_warning = deprecation_warning def __call__(self, f): def wrapped(*args, **kwargs): warnings.warn( "`%s.%s` is deprecated, use `%s` instead." % (self.class_name, self.old_method_name, self.new_method_name), self.deprecation_warning, 2) return f(*args, **kwargs) return wrapped class RenameMethodsBase(type): """ Handles the deprecation paths when renaming a method. It does the following: 1) Define the new method if missing and complain about it. 2) Define the old method if missing. 3) Complain whenever an old method is called. See #15363 for more details. """ renamed_methods = () def __new__(cls, name, bases, attrs): new_class = super().__new__(cls, name, bases, attrs) for base in inspect.getmro(new_class): class_name = base.__name__ for renamed_method in cls.renamed_methods: old_method_name = renamed_method[0] old_method = base.__dict__.get(old_method_name) new_method_name = renamed_method[1] new_method = base.__dict__.get(new_method_name) deprecation_warning = renamed_method[2] wrapper = warn_about_renamed_method(class_name, *renamed_method) # Define the new method if missing and complain about it if not new_method and old_method: warnings.warn( "`%s.%s` method should be renamed `%s`." % (class_name, old_method_name, new_method_name), deprecation_warning, 2) setattr(base, new_method_name, old_method) setattr(base, old_method_name, wrapper(old_method)) # Define the old method as a wrapped call to the new method. if not old_method and new_method: setattr(base, old_method_name, wrapper(new_method)) return new_class class DeprecationInstanceCheck(type): def __instancecheck__(self, instance): warnings.warn( "`%s` is deprecated, use `%s` instead." % (self.__name__, self.alternative), self.deprecation_warning, 2 ) return super().__instancecheck__(instance) class MiddlewareMixin: sync_capable = True async_capable = True # RemovedInDjango40Warning: when the deprecation ends, replace with: # def __init__(self, get_response): def __init__(self, get_response=None): self._get_response_none_deprecation(get_response) self.get_response = get_response self._async_check() super().__init__() def _async_check(self): """ If get_response is a coroutine function, turns us into async mode so a thread is not consumed during a whole request. """ if asyncio.iscoroutinefunction(self.get_response): # Mark the class as async-capable, but do the actual switch # inside __call__ to avoid swapping out dunder methods self._is_coroutine = asyncio.coroutines._is_coroutine def __call__(self, request): # Exit out to async mode, if needed if asyncio.iscoroutinefunction(self.get_response): return self.__acall__(request) response = None if hasattr(self, 'process_request'): response = self.process_request(request) response = response or self.get_response(request) if hasattr(self, 'process_response'): response = self.process_response(request, response) return response async def __acall__(self, request): """ Async version of __call__ that is swapped in when an async request is running. """ response = None if hasattr(self, 'process_request'): response = await sync_to_async(self.process_request)(request) response = response or await self.get_response(request) if hasattr(self, 'process_response'): response = await sync_to_async(self.process_response)(request, response) return response def _get_response_none_deprecation(self, get_response): if get_response is None: warnings.warn( 'Passing None for the middleware get_response argument is ' 'deprecated.', RemovedInDjango40Warning, stacklevel=3, )
838d714ead533ca4acb421b4b03e3bf867cf2f2881d7cc3cc4217b75a9dfadc2
import codecs import datetime import locale import warnings from decimal import Decimal from urllib.parse import quote from django.utils.deprecation import RemovedInDjango40Warning from django.utils.functional import Promise class DjangoUnicodeDecodeError(UnicodeDecodeError): def __init__(self, obj, *args): self.obj = obj super().__init__(*args) def __str__(self): return '%s. You passed in %r (%s)' % (super().__str__(), self.obj, type(self.obj)) def smart_str(s, encoding='utf-8', strings_only=False, errors='strict'): """ Return a string representing 's'. Treat bytestrings using the 'encoding' codec. If strings_only is True, don't convert (some) non-string-like objects. """ if isinstance(s, Promise): # The input is the result of a gettext_lazy() call. return s return force_str(s, encoding, strings_only, errors) _PROTECTED_TYPES = ( type(None), int, float, Decimal, datetime.datetime, datetime.date, datetime.time, ) def is_protected_type(obj): """Determine if the object instance is of a protected type. Objects of protected types are preserved as-is when passed to force_str(strings_only=True). """ return isinstance(obj, _PROTECTED_TYPES) def force_str(s, encoding='utf-8', strings_only=False, errors='strict'): """ Similar to smart_str(), except that lazy instances are resolved to strings, rather than kept as lazy objects. If strings_only is True, don't convert (some) non-string-like objects. """ # Handle the common case first for performance reasons. if issubclass(type(s), str): return s if strings_only and is_protected_type(s): return s try: if isinstance(s, bytes): s = str(s, encoding, errors) else: s = str(s) except UnicodeDecodeError as e: raise DjangoUnicodeDecodeError(s, *e.args) return s def smart_bytes(s, encoding='utf-8', strings_only=False, errors='strict'): """ Return a bytestring version of 's', encoded as specified in 'encoding'. If strings_only is True, don't convert (some) non-string-like objects. """ if isinstance(s, Promise): # The input is the result of a gettext_lazy() call. return s return force_bytes(s, encoding, strings_only, errors) def force_bytes(s, encoding='utf-8', strings_only=False, errors='strict'): """ Similar to smart_bytes, except that lazy instances are resolved to strings, rather than kept as lazy objects. If strings_only is True, don't convert (some) non-string-like objects. """ # Handle the common case first for performance reasons. if isinstance(s, bytes): if encoding == 'utf-8': return s else: return s.decode('utf-8', errors).encode(encoding, errors) if strings_only and is_protected_type(s): return s if isinstance(s, memoryview): return bytes(s) return str(s).encode(encoding, errors) def smart_text(s, encoding='utf-8', strings_only=False, errors='strict'): warnings.warn( 'smart_text() is deprecated in favor of smart_str().', RemovedInDjango40Warning, stacklevel=2, ) return smart_str(s, encoding, strings_only, errors) def force_text(s, encoding='utf-8', strings_only=False, errors='strict'): warnings.warn( 'force_text() is deprecated in favor of force_str().', RemovedInDjango40Warning, stacklevel=2, ) return force_str(s, encoding, strings_only, errors) def iri_to_uri(iri): """ Convert an Internationalized Resource Identifier (IRI) portion to a URI portion that is suitable for inclusion in a URL. This is the algorithm from section 3.1 of RFC 3987, slightly simplified since the input is assumed to be a string rather than an arbitrary byte stream. Take an IRI (string or UTF-8 bytes, e.g. '/I ♥ Django/' or b'/I \xe2\x99\xa5 Django/') and return a string containing the encoded result with ASCII chars only (e.g. '/I%20%E2%99%A5%20Django/'). """ # The list of safe characters here is constructed from the "reserved" and # "unreserved" characters specified in sections 2.2 and 2.3 of RFC 3986: # reserved = gen-delims / sub-delims # gen-delims = ":" / "/" / "?" / "#" / "[" / "]" / "@" # sub-delims = "!" / "$" / "&" / "'" / "(" / ")" # / "*" / "+" / "," / ";" / "=" # unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~" # Of the unreserved characters, urllib.parse.quote() already considers all # but the ~ safe. # The % character is also added to the list of safe characters here, as the # end of section 3.1 of RFC 3987 specifically mentions that % must not be # converted. if iri is None: return iri elif isinstance(iri, Promise): iri = str(iri) return quote(iri, safe="/#%[]=:;$&()+,!?*@'~") # List of byte values that uri_to_iri() decodes from percent encoding. # First, the unreserved characters from RFC 3986: _ascii_ranges = [[45, 46, 95, 126], range(65, 91), range(97, 123)] _hextobyte = { (fmt % char).encode(): bytes((char,)) for ascii_range in _ascii_ranges for char in ascii_range for fmt in ['%02x', '%02X'] } # And then everything above 128, because bytes ≥ 128 are part of multibyte # Unicode characters. _hexdig = '0123456789ABCDEFabcdef' _hextobyte.update({ (a + b).encode(): bytes.fromhex(a + b) for a in _hexdig[8:] for b in _hexdig }) def uri_to_iri(uri): """ Convert a Uniform Resource Identifier(URI) into an Internationalized Resource Identifier(IRI). This is the algorithm from section 3.2 of RFC 3987, excluding step 4. Take an URI in ASCII bytes (e.g. '/I%20%E2%99%A5%20Django/') and return a string containing the encoded result (e.g. '/I%20♥%20Django/'). """ if uri is None: return uri uri = force_bytes(uri) # Fast selective unqote: First, split on '%' and then starting with the # second block, decode the first 2 bytes if they represent a hex code to # decode. The rest of the block is the part after '%AB', not containing # any '%'. Add that to the output without further processing. bits = uri.split(b'%') if len(bits) == 1: iri = uri else: parts = [bits[0]] append = parts.append hextobyte = _hextobyte for item in bits[1:]: hex = item[:2] if hex in hextobyte: append(hextobyte[item[:2]]) append(item[2:]) else: append(b'%') append(item) iri = b''.join(parts) return repercent_broken_unicode(iri).decode() def escape_uri_path(path): """ Escape the unsafe characters from the path portion of a Uniform Resource Identifier (URI). """ # These are the "reserved" and "unreserved" characters specified in # sections 2.2 and 2.3 of RFC 2396: # reserved = ";" | "/" | "?" | ":" | "@" | "&" | "=" | "+" | "$" | "," # unreserved = alphanum | mark # mark = "-" | "_" | "." | "!" | "~" | "*" | "'" | "(" | ")" # The list of safe characters here is constructed subtracting ";", "=", # and "?" according to section 3.3 of RFC 2396. # The reason for not subtracting and escaping "/" is that we are escaping # the entire path, not a path segment. return quote(path, safe="/:@&+$,-_.!~*'()") def punycode(domain): """Return the Punycode of the given domain if it's non-ASCII.""" return domain.encode('idna').decode('ascii') def repercent_broken_unicode(path): """ As per section 3.2 of RFC 3987, step three of converting a URI into an IRI, repercent-encode any octet produced that is not part of a strictly legal UTF-8 octet sequence. """ while True: try: path.decode() except UnicodeDecodeError as e: # CVE-2019-14235: A recursion shouldn't be used since the exception # handling uses massive amounts of memory repercent = quote(path[e.start:e.end], safe=b"/#%[]=:;$&()+,!?*@'~") path = path[:e.start] + repercent.encode() + path[e.end:] else: return path def filepath_to_uri(path): """Convert a file system path to a URI portion that is suitable for inclusion in a URL. Encode certain chars that would normally be recognized as special chars for URIs. Do not encode the ' character, as it is a valid character within URIs. See the encodeURIComponent() JavaScript function for details. """ if path is None: return path # I know about `os.sep` and `os.altsep` but I want to leave # some flexibility for hardcoding separators. return quote(str(path).replace("\\", "/"), safe="/~!*()'") def get_system_encoding(): """ The encoding of the default system locale. Fallback to 'ascii' if the #encoding is unsupported by Python or could not be determined. See tickets #10335 and #5846. """ try: encoding = locale.getdefaultlocale()[1] or 'ascii' codecs.lookup(encoding) except Exception: encoding = 'ascii' return encoding DEFAULT_LOCALE_ENCODING = get_system_encoding()
438e1080f753343da656b65145fe2c902a12c4b0752d5f8e1d152c0d1a68ddd6
""" Django's standard crypto functions and utilities. """ import hashlib import hmac import secrets import warnings from django.conf import settings from django.utils.deprecation import RemovedInDjango40Warning from django.utils.encoding import force_bytes class InvalidAlgorithm(ValueError): """Algorithm is not supported by hashlib.""" pass def salted_hmac(key_salt, value, secret=None, *, algorithm='sha1'): """ Return the HMAC of 'value', using a key generated from key_salt and a secret (which defaults to settings.SECRET_KEY). Default algorithm is SHA1, but any algorithm name supported by hashlib.new() can be passed. A different key_salt should be passed in for every application of HMAC. """ if secret is None: secret = settings.SECRET_KEY key_salt = force_bytes(key_salt) secret = force_bytes(secret) try: hasher = getattr(hashlib, algorithm) except AttributeError as e: raise InvalidAlgorithm( '%r is not an algorithm accepted by the hashlib module.' % algorithm ) from e # We need to generate a derived key from our base key. We can do this by # passing the key_salt and our base key through a pseudo-random function. key = hasher(key_salt + secret).digest() # If len(key_salt + secret) > block size of the hash algorithm, the above # line is redundant and could be replaced by key = key_salt + secret, since # the hmac module does the same thing for keys longer than the block size. # However, we need to ensure that we *always* do this. return hmac.new(key, msg=force_bytes(value), digestmod=hasher) NOT_PROVIDED = object() # RemovedInDjango40Warning. # RemovedInDjango40Warning: when the deprecation ends, replace with: # def get_random_string(length, allowed_chars='...'): def get_random_string(length=NOT_PROVIDED, allowed_chars=( 'abcdefghijklmnopqrstuvwxyz' 'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789' )): """ Return a securely generated random string. The bit length of the returned value can be calculated with the formula: log_2(len(allowed_chars)^length) For example, with default `allowed_chars` (26+26+10), this gives: * length: 12, bit length =~ 71 bits * length: 22, bit length =~ 131 bits """ if length is NOT_PROVIDED: warnings.warn( 'Not providing a length argument is deprecated.', RemovedInDjango40Warning, ) length = 12 return ''.join(secrets.choice(allowed_chars) for i in range(length)) def constant_time_compare(val1, val2): """Return True if the two strings are equal, False otherwise.""" return secrets.compare_digest(force_bytes(val1), force_bytes(val2)) def pbkdf2(password, salt, iterations, dklen=0, digest=None): """Return the hash of password using pbkdf2.""" if digest is None: digest = hashlib.sha256 dklen = dklen or None password = force_bytes(password) salt = force_bytes(salt) return hashlib.pbkdf2_hmac(digest().name, password, salt, iterations, dklen)
10f2f536ce0d5ffa14cd867ec9660e7063fa4ad537f9e041547584e946ae6e08
import logging import logging.config # needed when logging_config doesn't start with logging.config from copy import copy from django.conf import settings from django.core import mail from django.core.mail import get_connection from django.core.management.color import color_style from django.utils.module_loading import import_string request_logger = logging.getLogger('django.request') # Default logging for Django. This sends an email to the site admins on every # HTTP 500 error. Depending on DEBUG, all other log records are either sent to # the console (DEBUG=True) or discarded (DEBUG=False) by means of the # require_debug_true filter. DEFAULT_LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'filters': { 'require_debug_false': { '()': 'django.utils.log.RequireDebugFalse', }, 'require_debug_true': { '()': 'django.utils.log.RequireDebugTrue', }, }, 'formatters': { 'django.server': { '()': 'django.utils.log.ServerFormatter', 'format': '[{server_time}] {message}', 'style': '{', } }, 'handlers': { 'console': { 'level': 'INFO', 'filters': ['require_debug_true'], 'class': 'logging.StreamHandler', }, 'django.server': { 'level': 'INFO', 'class': 'logging.StreamHandler', 'formatter': 'django.server', }, 'mail_admins': { 'level': 'ERROR', 'filters': ['require_debug_false'], 'class': 'django.utils.log.AdminEmailHandler' } }, 'loggers': { 'django': { 'handlers': ['console', 'mail_admins'], 'level': 'INFO', }, 'django.server': { 'handlers': ['django.server'], 'level': 'INFO', 'propagate': False, }, } } def configure_logging(logging_config, logging_settings): if logging_config: # First find the logging configuration function ... logging_config_func = import_string(logging_config) logging.config.dictConfig(DEFAULT_LOGGING) # ... then invoke it with the logging settings if logging_settings: logging_config_func(logging_settings) class AdminEmailHandler(logging.Handler): """An exception log handler that emails log entries to site admins. If the request is passed as the first argument to the log record, request data will be provided in the email report. """ def __init__(self, include_html=False, email_backend=None, reporter_class=None): super().__init__() self.include_html = include_html self.email_backend = email_backend self.reporter_class = import_string(reporter_class or settings.DEFAULT_EXCEPTION_REPORTER) def emit(self, record): try: request = record.request subject = '%s (%s IP): %s' % ( record.levelname, ('internal' if request.META.get('REMOTE_ADDR') in settings.INTERNAL_IPS else 'EXTERNAL'), record.getMessage() ) except Exception: subject = '%s: %s' % ( record.levelname, record.getMessage() ) request = None subject = self.format_subject(subject) # Since we add a nicely formatted traceback on our own, create a copy # of the log record without the exception data. no_exc_record = copy(record) no_exc_record.exc_info = None no_exc_record.exc_text = None if record.exc_info: exc_info = record.exc_info else: exc_info = (None, record.getMessage(), None) reporter = self.reporter_class(request, is_email=True, *exc_info) message = "%s\n\n%s" % (self.format(no_exc_record), reporter.get_traceback_text()) html_message = reporter.get_traceback_html() if self.include_html else None self.send_mail(subject, message, fail_silently=True, html_message=html_message) def send_mail(self, subject, message, *args, **kwargs): mail.mail_admins(subject, message, *args, connection=self.connection(), **kwargs) def connection(self): return get_connection(backend=self.email_backend, fail_silently=True) def format_subject(self, subject): """ Escape CR and LF characters. """ return subject.replace('\n', '\\n').replace('\r', '\\r') class CallbackFilter(logging.Filter): """ A logging filter that checks the return value of a given callable (which takes the record-to-be-logged as its only parameter) to decide whether to log a record. """ def __init__(self, callback): self.callback = callback def filter(self, record): if self.callback(record): return 1 return 0 class RequireDebugFalse(logging.Filter): def filter(self, record): return not settings.DEBUG class RequireDebugTrue(logging.Filter): def filter(self, record): return settings.DEBUG class ServerFormatter(logging.Formatter): default_time_format = '%d/%b/%Y %H:%M:%S' def __init__(self, *args, **kwargs): self.style = color_style() super().__init__(*args, **kwargs) def format(self, record): msg = record.msg status_code = getattr(record, 'status_code', None) if status_code: if 200 <= status_code < 300: # Put 2XX first, since it should be the common case msg = self.style.HTTP_SUCCESS(msg) elif 100 <= status_code < 200: msg = self.style.HTTP_INFO(msg) elif status_code == 304: msg = self.style.HTTP_NOT_MODIFIED(msg) elif 300 <= status_code < 400: msg = self.style.HTTP_REDIRECT(msg) elif status_code == 404: msg = self.style.HTTP_NOT_FOUND(msg) elif 400 <= status_code < 500: msg = self.style.HTTP_BAD_REQUEST(msg) else: # Any 5XX, or any other status code msg = self.style.HTTP_SERVER_ERROR(msg) if self.uses_server_time() and not hasattr(record, 'server_time'): record.server_time = self.formatTime(record, self.datefmt) record.msg = msg return super().format(record) def uses_server_time(self): return self._fmt.find('{server_time}') >= 0 def log_response(message, *args, response=None, request=None, logger=request_logger, level=None, exc_info=None): """ Log errors based on HttpResponse status. Log 5xx responses as errors and 4xx responses as warnings (unless a level is given as a keyword argument). The HttpResponse status_code and the request are passed to the logger's extra parameter. """ # Check if the response has already been logged. Multiple requests to log # the same response can be received in some cases, e.g., when the # response is the result of an exception and is logged at the time the # exception is caught so that the exc_info can be recorded. if getattr(response, '_has_been_logged', False): return if level is None: if response.status_code >= 500: level = 'error' elif response.status_code >= 400: level = 'warning' else: level = 'info' getattr(logger, level)( message, *args, extra={ 'status_code': response.status_code, 'request': request, }, exc_info=exc_info, ) response._has_been_logged = True
15d4fb16e84b406c9a91081af59565ad42dad28182a415f554fc2a8dc0dcdf97
import copy import itertools import operator from functools import total_ordering, wraps class cached_property: """ Decorator that converts a method with a single self argument into a property cached on the instance. A cached property can be made out of an existing method: (e.g. ``url = cached_property(get_absolute_url)``). The optional ``name`` argument is obsolete as of Python 3.6 and will be deprecated in Django 4.0 (#30127). """ name = None @staticmethod def func(instance): raise TypeError( 'Cannot use cached_property instance without calling ' '__set_name__() on it.' ) def __init__(self, func, name=None): self.real_func = func self.__doc__ = getattr(func, '__doc__') def __set_name__(self, owner, name): if self.name is None: self.name = name self.func = self.real_func elif name != self.name: raise TypeError( "Cannot assign the same cached_property to two different names " "(%r and %r)." % (self.name, name) ) def __get__(self, instance, cls=None): """ Call the function and put the return value in instance.__dict__ so that subsequent attribute access on the instance returns the cached value instead of calling cached_property.__get__(). """ if instance is None: return self res = instance.__dict__[self.name] = self.func(instance) return res class classproperty: """ Decorator that converts a method with a single cls argument into a property that can be accessed directly from the class. """ def __init__(self, method=None): self.fget = method def __get__(self, instance, cls=None): return self.fget(cls) def getter(self, method): self.fget = method return self class Promise: """ Base class for the proxy class created in the closure of the lazy function. It's used to recognize promises in code. """ pass def lazy(func, *resultclasses): """ Turn any callable into a lazy evaluated callable. result classes or types is required -- at least one is needed so that the automatic forcing of the lazy evaluation code is triggered. Results are not memoized; the function is evaluated on every access. """ @total_ordering class __proxy__(Promise): """ Encapsulate a function call and act as a proxy for methods that are called on the result of that function. The function is not evaluated until one of the methods on the result is called. """ __prepared = False def __init__(self, args, kw): self.__args = args self.__kw = kw if not self.__prepared: self.__prepare_class__() self.__class__.__prepared = True def __reduce__(self): return ( _lazy_proxy_unpickle, (func, self.__args, self.__kw) + resultclasses ) def __repr__(self): return repr(self.__cast()) @classmethod def __prepare_class__(cls): for resultclass in resultclasses: for type_ in resultclass.mro(): for method_name in type_.__dict__: # All __promise__ return the same wrapper method, they # look up the correct implementation when called. if hasattr(cls, method_name): continue meth = cls.__promise__(method_name) setattr(cls, method_name, meth) cls._delegate_bytes = bytes in resultclasses cls._delegate_text = str in resultclasses assert not (cls._delegate_bytes and cls._delegate_text), ( "Cannot call lazy() with both bytes and text return types.") if cls._delegate_text: cls.__str__ = cls.__text_cast elif cls._delegate_bytes: cls.__bytes__ = cls.__bytes_cast @classmethod def __promise__(cls, method_name): # Builds a wrapper around some magic method def __wrapper__(self, *args, **kw): # Automatically triggers the evaluation of a lazy value and # applies the given magic method of the result type. res = func(*self.__args, **self.__kw) return getattr(res, method_name)(*args, **kw) return __wrapper__ def __text_cast(self): return func(*self.__args, **self.__kw) def __bytes_cast(self): return bytes(func(*self.__args, **self.__kw)) def __bytes_cast_encoded(self): return func(*self.__args, **self.__kw).encode() def __cast(self): if self._delegate_bytes: return self.__bytes_cast() elif self._delegate_text: return self.__text_cast() else: return func(*self.__args, **self.__kw) def __str__(self): # object defines __str__(), so __prepare_class__() won't overload # a __str__() method from the proxied class. return str(self.__cast()) def __eq__(self, other): if isinstance(other, Promise): other = other.__cast() return self.__cast() == other def __lt__(self, other): if isinstance(other, Promise): other = other.__cast() return self.__cast() < other def __hash__(self): return hash(self.__cast()) def __mod__(self, rhs): if self._delegate_text: return str(self) % rhs return self.__cast() % rhs def __deepcopy__(self, memo): # Instances of this class are effectively immutable. It's just a # collection of functions. So we don't need to do anything # complicated for copying. memo[id(self)] = self return self @wraps(func) def __wrapper__(*args, **kw): # Creates the proxy object, instead of the actual value. return __proxy__(args, kw) return __wrapper__ def _lazy_proxy_unpickle(func, args, kwargs, *resultclasses): return lazy(func, *resultclasses)(*args, **kwargs) def lazystr(text): """ Shortcut for the common case of a lazy callable that returns str. """ return lazy(str, str)(text) def keep_lazy(*resultclasses): """ A decorator that allows a function to be called with one or more lazy arguments. If none of the args are lazy, the function is evaluated immediately, otherwise a __proxy__ is returned that will evaluate the function when needed. """ if not resultclasses: raise TypeError("You must pass at least one argument to keep_lazy().") def decorator(func): lazy_func = lazy(func, *resultclasses) @wraps(func) def wrapper(*args, **kwargs): if any(isinstance(arg, Promise) for arg in itertools.chain(args, kwargs.values())): return lazy_func(*args, **kwargs) return func(*args, **kwargs) return wrapper return decorator def keep_lazy_text(func): """ A decorator for functions that accept lazy arguments and return text. """ return keep_lazy(str)(func) empty = object() def new_method_proxy(func): def inner(self, *args): if self._wrapped is empty: self._setup() return func(self._wrapped, *args) return inner class LazyObject: """ A wrapper for another class that can be used to delay instantiation of the wrapped class. By subclassing, you have the opportunity to intercept and alter the instantiation. If you don't need to do that, use SimpleLazyObject. """ # Avoid infinite recursion when tracing __init__ (#19456). _wrapped = None def __init__(self): # Note: if a subclass overrides __init__(), it will likely need to # override __copy__() and __deepcopy__() as well. self._wrapped = empty __getattr__ = new_method_proxy(getattr) def __setattr__(self, name, value): if name == "_wrapped": # Assign to __dict__ to avoid infinite __setattr__ loops. self.__dict__["_wrapped"] = value else: if self._wrapped is empty: self._setup() setattr(self._wrapped, name, value) def __delattr__(self, name): if name == "_wrapped": raise TypeError("can't delete _wrapped.") if self._wrapped is empty: self._setup() delattr(self._wrapped, name) def _setup(self): """ Must be implemented by subclasses to initialize the wrapped object. """ raise NotImplementedError('subclasses of LazyObject must provide a _setup() method') # Because we have messed with __class__ below, we confuse pickle as to what # class we are pickling. We're going to have to initialize the wrapped # object to successfully pickle it, so we might as well just pickle the # wrapped object since they're supposed to act the same way. # # Unfortunately, if we try to simply act like the wrapped object, the ruse # will break down when pickle gets our id(). Thus we end up with pickle # thinking, in effect, that we are a distinct object from the wrapped # object, but with the same __dict__. This can cause problems (see #25389). # # So instead, we define our own __reduce__ method and custom unpickler. We # pickle the wrapped object as the unpickler's argument, so that pickle # will pickle it normally, and then the unpickler simply returns its # argument. def __reduce__(self): if self._wrapped is empty: self._setup() return (unpickle_lazyobject, (self._wrapped,)) def __copy__(self): if self._wrapped is empty: # If uninitialized, copy the wrapper. Use type(self), not # self.__class__, because the latter is proxied. return type(self)() else: # If initialized, return a copy of the wrapped object. return copy.copy(self._wrapped) def __deepcopy__(self, memo): if self._wrapped is empty: # We have to use type(self), not self.__class__, because the # latter is proxied. result = type(self)() memo[id(self)] = result return result return copy.deepcopy(self._wrapped, memo) __bytes__ = new_method_proxy(bytes) __str__ = new_method_proxy(str) __bool__ = new_method_proxy(bool) # Introspection support __dir__ = new_method_proxy(dir) # Need to pretend to be the wrapped class, for the sake of objects that # care about this (especially in equality tests) __class__ = property(new_method_proxy(operator.attrgetter("__class__"))) __eq__ = new_method_proxy(operator.eq) __lt__ = new_method_proxy(operator.lt) __gt__ = new_method_proxy(operator.gt) __ne__ = new_method_proxy(operator.ne) __hash__ = new_method_proxy(hash) # List/Tuple/Dictionary methods support __getitem__ = new_method_proxy(operator.getitem) __setitem__ = new_method_proxy(operator.setitem) __delitem__ = new_method_proxy(operator.delitem) __iter__ = new_method_proxy(iter) __len__ = new_method_proxy(len) __contains__ = new_method_proxy(operator.contains) def unpickle_lazyobject(wrapped): """ Used to unpickle lazy objects. Just return its argument, which will be the wrapped object. """ return wrapped class SimpleLazyObject(LazyObject): """ A lazy object initialized from any function. Designed for compound objects of unknown type. For builtins or objects of known type, use django.utils.functional.lazy. """ def __init__(self, func): """ Pass in a callable that returns the object to be wrapped. If copies are made of the resulting SimpleLazyObject, which can happen in various circumstances within Django, then you must ensure that the callable can be safely run more than once and will return the same value. """ self.__dict__['_setupfunc'] = func super().__init__() def _setup(self): self._wrapped = self._setupfunc() # Return a meaningful representation of the lazy object for debugging # without evaluating the wrapped object. def __repr__(self): if self._wrapped is empty: repr_attr = self._setupfunc else: repr_attr = self._wrapped return '<%s: %r>' % (type(self).__name__, repr_attr) def __copy__(self): if self._wrapped is empty: # If uninitialized, copy the wrapper. Use SimpleLazyObject, not # self.__class__, because the latter is proxied. return SimpleLazyObject(self._setupfunc) else: # If initialized, return a copy of the wrapped object. return copy.copy(self._wrapped) def __deepcopy__(self, memo): if self._wrapped is empty: # We have to use SimpleLazyObject, not self.__class__, because the # latter is proxied. result = SimpleLazyObject(self._setupfunc) memo[id(self)] = result return result return copy.deepcopy(self._wrapped, memo) def partition(predicate, values): """ Split the values into two sets, based on the return value of the function (True/False). e.g.: >>> partition(lambda x: x > 3, range(5)) [0, 1, 2, 3], [4] """ results = ([], []) for item in values: results[predicate(item)].append(item) return results
5f4d3a4aa3e37607d06b49125838b87ee852eec427fc0ee3116553ff8a3d3d58
import posixpath from collections import defaultdict from django.utils.safestring import mark_safe from .base import ( Node, Template, TemplateSyntaxError, TextNode, Variable, token_kwargs, ) from .library import Library register = Library() BLOCK_CONTEXT_KEY = 'block_context' class BlockContext: def __init__(self): # Dictionary of FIFO queues. self.blocks = defaultdict(list) def add_blocks(self, blocks): for name, block in blocks.items(): self.blocks[name].insert(0, block) def pop(self, name): try: return self.blocks[name].pop() except IndexError: return None def push(self, name, block): self.blocks[name].append(block) def get_block(self, name): try: return self.blocks[name][-1] except IndexError: return None class BlockNode(Node): def __init__(self, name, nodelist, parent=None): self.name, self.nodelist, self.parent = name, nodelist, parent def __repr__(self): return "<Block Node: %s. Contents: %r>" % (self.name, self.nodelist) def render(self, context): block_context = context.render_context.get(BLOCK_CONTEXT_KEY) with context.push(): if block_context is None: context['block'] = self result = self.nodelist.render(context) else: push = block = block_context.pop(self.name) if block is None: block = self # Create new block so we can store context without thread-safety issues. block = type(self)(block.name, block.nodelist) block.context = context context['block'] = block result = block.nodelist.render(context) if push is not None: block_context.push(self.name, push) return result def super(self): if not hasattr(self, 'context'): raise TemplateSyntaxError( "'%s' object has no attribute 'context'. Did you use " "{{ block.super }} in a base template?" % self.__class__.__name__ ) render_context = self.context.render_context if (BLOCK_CONTEXT_KEY in render_context and render_context[BLOCK_CONTEXT_KEY].get_block(self.name) is not None): return mark_safe(self.render(self.context)) return '' class ExtendsNode(Node): must_be_first = True context_key = 'extends_context' def __init__(self, nodelist, parent_name, template_dirs=None): self.nodelist = nodelist self.parent_name = parent_name self.template_dirs = template_dirs self.blocks = {n.name: n for n in nodelist.get_nodes_by_type(BlockNode)} def __repr__(self): return '<%s: extends %s>' % (self.__class__.__name__, self.parent_name.token) def find_template(self, template_name, context): """ This is a wrapper around engine.find_template(). A history is kept in the render_context attribute between successive extends calls and passed as the skip argument. This enables extends to work recursively without extending the same template twice. """ history = context.render_context.setdefault( self.context_key, [self.origin], ) template, origin = context.template.engine.find_template( template_name, skip=history, ) history.append(origin) return template def get_parent(self, context): parent = self.parent_name.resolve(context) if not parent: error_msg = "Invalid template name in 'extends' tag: %r." % parent if self.parent_name.filters or\ isinstance(self.parent_name.var, Variable): error_msg += " Got this from the '%s' variable." %\ self.parent_name.token raise TemplateSyntaxError(error_msg) if isinstance(parent, Template): # parent is a django.template.Template return parent if isinstance(getattr(parent, 'template', None), Template): # parent is a django.template.backends.django.Template return parent.template return self.find_template(parent, context) def render(self, context): compiled_parent = self.get_parent(context) if BLOCK_CONTEXT_KEY not in context.render_context: context.render_context[BLOCK_CONTEXT_KEY] = BlockContext() block_context = context.render_context[BLOCK_CONTEXT_KEY] # Add the block nodes from this node to the block context block_context.add_blocks(self.blocks) # If this block's parent doesn't have an extends node it is the root, # and its block nodes also need to be added to the block context. for node in compiled_parent.nodelist: # The ExtendsNode has to be the first non-text node. if not isinstance(node, TextNode): if not isinstance(node, ExtendsNode): blocks = {n.name: n for n in compiled_parent.nodelist.get_nodes_by_type(BlockNode)} block_context.add_blocks(blocks) break # Call Template._render explicitly so the parser context stays # the same. with context.render_context.push_state(compiled_parent, isolated_context=False): return compiled_parent._render(context) class IncludeNode(Node): context_key = '__include_context' def __init__(self, template, *args, extra_context=None, isolated_context=False, **kwargs): self.template = template self.extra_context = extra_context or {} self.isolated_context = isolated_context super().__init__(*args, **kwargs) def render(self, context): """ Render the specified template and context. Cache the template object in render_context to avoid reparsing and loading when used in a for loop. """ template = self.template.resolve(context) # Does this quack like a Template? if not callable(getattr(template, 'render', None)): # If not, try the cache and select_template(). template_name = template or () if isinstance(template_name, str): template_name = (template_name,) else: template_name = tuple(template_name) cache = context.render_context.dicts[0].setdefault(self, {}) template = cache.get(template_name) if template is None: template = context.template.engine.select_template(template_name) cache[template_name] = template # Use the base.Template of a backends.django.Template. elif hasattr(template, 'template'): template = template.template values = { name: var.resolve(context) for name, var in self.extra_context.items() } if self.isolated_context: return template.render(context.new(values)) with context.push(**values): return template.render(context) @register.tag('block') def do_block(parser, token): """ Define a block that can be overridden by child templates. """ # token.split_contents() isn't useful here because this tag doesn't accept variable as arguments bits = token.contents.split() if len(bits) != 2: raise TemplateSyntaxError("'%s' tag takes only one argument" % bits[0]) block_name = bits[1] # Keep track of the names of BlockNodes found in this template, so we can # check for duplication. try: if block_name in parser.__loaded_blocks: raise TemplateSyntaxError("'%s' tag with name '%s' appears more than once" % (bits[0], block_name)) parser.__loaded_blocks.append(block_name) except AttributeError: # parser.__loaded_blocks isn't a list yet parser.__loaded_blocks = [block_name] nodelist = parser.parse(('endblock',)) # This check is kept for backwards-compatibility. See #3100. endblock = parser.next_token() acceptable_endblocks = ('endblock', 'endblock %s' % block_name) if endblock.contents not in acceptable_endblocks: parser.invalid_block_tag(endblock, 'endblock', acceptable_endblocks) return BlockNode(block_name, nodelist) def construct_relative_path(current_template_name, relative_name): """ Convert a relative path (starting with './' or '../') to the full template name based on the current_template_name. """ if not relative_name.startswith(("'./", "'../", '"./', '"../')): # relative_name is a variable or a literal that doesn't contain a # relative path. return relative_name new_name = posixpath.normpath( posixpath.join( posixpath.dirname(current_template_name.lstrip('/')), relative_name.strip('\'"') ) ) if new_name.startswith('../'): raise TemplateSyntaxError( "The relative path '%s' points outside the file hierarchy that " "template '%s' is in." % (relative_name, current_template_name) ) if current_template_name.lstrip('/') == new_name: raise TemplateSyntaxError( "The relative path '%s' was translated to template name '%s', the " "same template in which the tag appears." % (relative_name, current_template_name) ) return '"%s"' % new_name @register.tag('extends') def do_extends(parser, token): """ Signal that this template extends a parent template. This tag may be used in two ways: ``{% extends "base" %}`` (with quotes) uses the literal value "base" as the name of the parent template to extend, or ``{% extends variable %}`` uses the value of ``variable`` as either the name of the parent template to extend (if it evaluates to a string) or as the parent template itself (if it evaluates to a Template object). """ bits = token.split_contents() if len(bits) != 2: raise TemplateSyntaxError("'%s' takes one argument" % bits[0]) bits[1] = construct_relative_path(parser.origin.template_name, bits[1]) parent_name = parser.compile_filter(bits[1]) nodelist = parser.parse() if nodelist.get_nodes_by_type(ExtendsNode): raise TemplateSyntaxError("'%s' cannot appear more than once in the same template" % bits[0]) return ExtendsNode(nodelist, parent_name) @register.tag('include') def do_include(parser, token): """ Load a template and render it with the current context. You can pass additional context using keyword arguments. Example:: {% include "foo/some_include" %} {% include "foo/some_include" with bar="BAZZ!" baz="BING!" %} Use the ``only`` argument to exclude the current context when rendering the included template:: {% include "foo/some_include" only %} {% include "foo/some_include" with bar="1" only %} """ bits = token.split_contents() if len(bits) < 2: raise TemplateSyntaxError( "%r tag takes at least one argument: the name of the template to " "be included." % bits[0] ) options = {} remaining_bits = bits[2:] while remaining_bits: option = remaining_bits.pop(0) if option in options: raise TemplateSyntaxError('The %r option was specified more ' 'than once.' % option) if option == 'with': value = token_kwargs(remaining_bits, parser, support_legacy=False) if not value: raise TemplateSyntaxError('"with" in %r tag needs at least ' 'one keyword argument.' % bits[0]) elif option == 'only': value = True else: raise TemplateSyntaxError('Unknown argument for %r tag: %r.' % (bits[0], option)) options[option] = value isolated_context = options.get('only', False) namemap = options.get('with', {}) bits[1] = construct_relative_path(parser.origin.template_name, bits[1]) return IncludeNode(parser.compile_filter(bits[1]), extra_context=namemap, isolated_context=isolated_context)
99c0b0651d4474a3249ab1b89ed7d38c5c360c4827f826cdc526d600e6edd144
"""Default tags used by the template system, available to all templates.""" import re import sys import warnings from collections import namedtuple from datetime import datetime from itertools import cycle as itertools_cycle, groupby from django.conf import settings from django.utils import timezone from django.utils.html import conditional_escape, format_html from django.utils.lorem_ipsum import paragraphs, words from django.utils.safestring import mark_safe from .base import ( BLOCK_TAG_END, BLOCK_TAG_START, COMMENT_TAG_END, COMMENT_TAG_START, FILTER_SEPARATOR, SINGLE_BRACE_END, SINGLE_BRACE_START, VARIABLE_ATTRIBUTE_SEPARATOR, VARIABLE_TAG_END, VARIABLE_TAG_START, Node, NodeList, TemplateSyntaxError, VariableDoesNotExist, kwarg_re, render_value_in_context, token_kwargs, ) from .context import Context from .defaultfilters import date from .library import Library from .smartif import IfParser, Literal register = Library() class AutoEscapeControlNode(Node): """Implement the actions of the autoescape tag.""" def __init__(self, setting, nodelist): self.setting, self.nodelist = setting, nodelist def render(self, context): old_setting = context.autoescape context.autoescape = self.setting output = self.nodelist.render(context) context.autoescape = old_setting if self.setting: return mark_safe(output) else: return output class CommentNode(Node): def render(self, context): return '' class CsrfTokenNode(Node): def render(self, context): csrf_token = context.get('csrf_token') if csrf_token: if csrf_token == 'NOTPROVIDED': return format_html("") else: return format_html('<input type="hidden" name="csrfmiddlewaretoken" value="{}">', csrf_token) else: # It's very probable that the token is missing because of # misconfiguration, so we raise a warning if settings.DEBUG: warnings.warn( "A {% csrf_token %} was used in a template, but the context " "did not provide the value. This is usually caused by not " "using RequestContext." ) return '' class CycleNode(Node): def __init__(self, cyclevars, variable_name=None, silent=False): self.cyclevars = cyclevars self.variable_name = variable_name self.silent = silent def render(self, context): if self not in context.render_context: # First time the node is rendered in template context.render_context[self] = itertools_cycle(self.cyclevars) cycle_iter = context.render_context[self] value = next(cycle_iter).resolve(context) if self.variable_name: context.set_upward(self.variable_name, value) if self.silent: return '' return render_value_in_context(value, context) def reset(self, context): """ Reset the cycle iteration back to the beginning. """ context.render_context[self] = itertools_cycle(self.cyclevars) class DebugNode(Node): def render(self, context): from pprint import pformat output = [pformat(val) for val in context] output.append('\n\n') output.append(pformat(sys.modules)) return ''.join(output) class FilterNode(Node): def __init__(self, filter_expr, nodelist): self.filter_expr, self.nodelist = filter_expr, nodelist def render(self, context): output = self.nodelist.render(context) # Apply filters. with context.push(var=output): return self.filter_expr.resolve(context) class FirstOfNode(Node): def __init__(self, variables, asvar=None): self.vars = variables self.asvar = asvar def render(self, context): first = '' for var in self.vars: value = var.resolve(context, ignore_failures=True) if value: first = render_value_in_context(value, context) break if self.asvar: context[self.asvar] = first return '' return first class ForNode(Node): child_nodelists = ('nodelist_loop', 'nodelist_empty') def __init__(self, loopvars, sequence, is_reversed, nodelist_loop, nodelist_empty=None): self.loopvars, self.sequence = loopvars, sequence self.is_reversed = is_reversed self.nodelist_loop = nodelist_loop if nodelist_empty is None: self.nodelist_empty = NodeList() else: self.nodelist_empty = nodelist_empty def __repr__(self): reversed_text = ' reversed' if self.is_reversed else '' return '<%s: for %s in %s, tail_len: %d%s>' % ( self.__class__.__name__, ', '.join(self.loopvars), self.sequence, len(self.nodelist_loop), reversed_text, ) def render(self, context): if 'forloop' in context: parentloop = context['forloop'] else: parentloop = {} with context.push(): values = self.sequence.resolve(context, ignore_failures=True) if values is None: values = [] if not hasattr(values, '__len__'): values = list(values) len_values = len(values) if len_values < 1: return self.nodelist_empty.render(context) nodelist = [] if self.is_reversed: values = reversed(values) num_loopvars = len(self.loopvars) unpack = num_loopvars > 1 # Create a forloop value in the context. We'll update counters on each # iteration just below. loop_dict = context['forloop'] = {'parentloop': parentloop} for i, item in enumerate(values): # Shortcuts for current loop iteration number. loop_dict['counter0'] = i loop_dict['counter'] = i + 1 # Reverse counter iteration numbers. loop_dict['revcounter'] = len_values - i loop_dict['revcounter0'] = len_values - i - 1 # Boolean values designating first and last times through loop. loop_dict['first'] = (i == 0) loop_dict['last'] = (i == len_values - 1) pop_context = False if unpack: # If there are multiple loop variables, unpack the item into # them. try: len_item = len(item) except TypeError: # not an iterable len_item = 1 # Check loop variable count before unpacking if num_loopvars != len_item: raise ValueError( "Need {} values to unpack in for loop; got {}. " .format(num_loopvars, len_item), ) unpacked_vars = dict(zip(self.loopvars, item)) pop_context = True context.update(unpacked_vars) else: context[self.loopvars[0]] = item for node in self.nodelist_loop: nodelist.append(node.render_annotated(context)) if pop_context: # Pop the loop variables pushed on to the context to avoid # the context ending up in an inconsistent state when other # tags (e.g., include and with) push data to context. context.pop() return mark_safe(''.join(nodelist)) class IfChangedNode(Node): child_nodelists = ('nodelist_true', 'nodelist_false') def __init__(self, nodelist_true, nodelist_false, *varlist): self.nodelist_true, self.nodelist_false = nodelist_true, nodelist_false self._varlist = varlist def render(self, context): # Init state storage state_frame = self._get_context_stack_frame(context) state_frame.setdefault(self) nodelist_true_output = None if self._varlist: # Consider multiple parameters. This behaves like an OR evaluation # of the multiple variables. compare_to = [var.resolve(context, ignore_failures=True) for var in self._varlist] else: # The "{% ifchanged %}" syntax (without any variables) compares # the rendered output. compare_to = nodelist_true_output = self.nodelist_true.render(context) if compare_to != state_frame[self]: state_frame[self] = compare_to # render true block if not already rendered return nodelist_true_output or self.nodelist_true.render(context) elif self.nodelist_false: return self.nodelist_false.render(context) return '' def _get_context_stack_frame(self, context): # The Context object behaves like a stack where each template tag can create a new scope. # Find the place where to store the state to detect changes. if 'forloop' in context: # Ifchanged is bound to the local for loop. # When there is a loop-in-loop, the state is bound to the inner loop, # so it resets when the outer loop continues. return context['forloop'] else: # Using ifchanged outside loops. Effectively this is a no-op because the state is associated with 'self'. return context.render_context class IfEqualNode(Node): child_nodelists = ('nodelist_true', 'nodelist_false') def __init__(self, var1, var2, nodelist_true, nodelist_false, negate): self.var1, self.var2 = var1, var2 self.nodelist_true, self.nodelist_false = nodelist_true, nodelist_false self.negate = negate def __repr__(self): return '<%s>' % self.__class__.__name__ def render(self, context): val1 = self.var1.resolve(context, ignore_failures=True) val2 = self.var2.resolve(context, ignore_failures=True) if (self.negate and val1 != val2) or (not self.negate and val1 == val2): return self.nodelist_true.render(context) return self.nodelist_false.render(context) class IfNode(Node): def __init__(self, conditions_nodelists): self.conditions_nodelists = conditions_nodelists def __repr__(self): return '<%s>' % self.__class__.__name__ def __iter__(self): for _, nodelist in self.conditions_nodelists: yield from nodelist @property def nodelist(self): return NodeList(self) def render(self, context): for condition, nodelist in self.conditions_nodelists: if condition is not None: # if / elif clause try: match = condition.eval(context) except VariableDoesNotExist: match = None else: # else clause match = True if match: return nodelist.render(context) return '' class LoremNode(Node): def __init__(self, count, method, common): self.count, self.method, self.common = count, method, common def render(self, context): try: count = int(self.count.resolve(context)) except (ValueError, TypeError): count = 1 if self.method == 'w': return words(count, common=self.common) else: paras = paragraphs(count, common=self.common) if self.method == 'p': paras = ['<p>%s</p>' % p for p in paras] return '\n\n'.join(paras) GroupedResult = namedtuple('GroupedResult', ['grouper', 'list']) class RegroupNode(Node): def __init__(self, target, expression, var_name): self.target, self.expression = target, expression self.var_name = var_name def resolve_expression(self, obj, context): # This method is called for each object in self.target. See regroup() # for the reason why we temporarily put the object in the context. context[self.var_name] = obj return self.expression.resolve(context, ignore_failures=True) def render(self, context): obj_list = self.target.resolve(context, ignore_failures=True) if obj_list is None: # target variable wasn't found in context; fail silently. context[self.var_name] = [] return '' # List of dictionaries in the format: # {'grouper': 'key', 'list': [list of contents]}. context[self.var_name] = [ GroupedResult(grouper=key, list=list(val)) for key, val in groupby(obj_list, lambda obj: self.resolve_expression(obj, context)) ] return '' class LoadNode(Node): def render(self, context): return '' class NowNode(Node): def __init__(self, format_string, asvar=None): self.format_string = format_string self.asvar = asvar def render(self, context): tzinfo = timezone.get_current_timezone() if settings.USE_TZ else None formatted = date(datetime.now(tz=tzinfo), self.format_string) if self.asvar: context[self.asvar] = formatted return '' else: return formatted class ResetCycleNode(Node): def __init__(self, node): self.node = node def render(self, context): self.node.reset(context) return '' class SpacelessNode(Node): def __init__(self, nodelist): self.nodelist = nodelist def render(self, context): from django.utils.html import strip_spaces_between_tags return strip_spaces_between_tags(self.nodelist.render(context).strip()) class TemplateTagNode(Node): mapping = { 'openblock': BLOCK_TAG_START, 'closeblock': BLOCK_TAG_END, 'openvariable': VARIABLE_TAG_START, 'closevariable': VARIABLE_TAG_END, 'openbrace': SINGLE_BRACE_START, 'closebrace': SINGLE_BRACE_END, 'opencomment': COMMENT_TAG_START, 'closecomment': COMMENT_TAG_END, } def __init__(self, tagtype): self.tagtype = tagtype def render(self, context): return self.mapping.get(self.tagtype, '') class URLNode(Node): def __init__(self, view_name, args, kwargs, asvar): self.view_name = view_name self.args = args self.kwargs = kwargs self.asvar = asvar def render(self, context): from django.urls import reverse, NoReverseMatch args = [arg.resolve(context) for arg in self.args] kwargs = {k: v.resolve(context) for k, v in self.kwargs.items()} view_name = self.view_name.resolve(context) try: current_app = context.request.current_app except AttributeError: try: current_app = context.request.resolver_match.namespace except AttributeError: current_app = None # Try to look up the URL. If it fails, raise NoReverseMatch unless the # {% url ... as var %} construct is used, in which case return nothing. url = '' try: url = reverse(view_name, args=args, kwargs=kwargs, current_app=current_app) except NoReverseMatch: if self.asvar is None: raise if self.asvar: context[self.asvar] = url return '' else: if context.autoescape: url = conditional_escape(url) return url class VerbatimNode(Node): def __init__(self, content): self.content = content def render(self, context): return self.content class WidthRatioNode(Node): def __init__(self, val_expr, max_expr, max_width, asvar=None): self.val_expr = val_expr self.max_expr = max_expr self.max_width = max_width self.asvar = asvar def render(self, context): try: value = self.val_expr.resolve(context) max_value = self.max_expr.resolve(context) max_width = int(self.max_width.resolve(context)) except VariableDoesNotExist: return '' except (ValueError, TypeError): raise TemplateSyntaxError("widthratio final argument must be a number") try: value = float(value) max_value = float(max_value) ratio = (value / max_value) * max_width result = str(round(ratio)) except ZeroDivisionError: result = '0' except (ValueError, TypeError, OverflowError): result = '' if self.asvar: context[self.asvar] = result return '' else: return result class WithNode(Node): def __init__(self, var, name, nodelist, extra_context=None): self.nodelist = nodelist # var and name are legacy attributes, being left in case they are used # by third-party subclasses of this Node. self.extra_context = extra_context or {} if name: self.extra_context[name] = var def __repr__(self): return '<%s>' % self.__class__.__name__ def render(self, context): values = {key: val.resolve(context) for key, val in self.extra_context.items()} with context.push(**values): return self.nodelist.render(context) @register.tag def autoescape(parser, token): """ Force autoescape behavior for this block. """ # token.split_contents() isn't useful here because this tag doesn't accept variable as arguments args = token.contents.split() if len(args) != 2: raise TemplateSyntaxError("'autoescape' tag requires exactly one argument.") arg = args[1] if arg not in ('on', 'off'): raise TemplateSyntaxError("'autoescape' argument should be 'on' or 'off'") nodelist = parser.parse(('endautoescape',)) parser.delete_first_token() return AutoEscapeControlNode((arg == 'on'), nodelist) @register.tag def comment(parser, token): """ Ignore everything between ``{% comment %}`` and ``{% endcomment %}``. """ parser.skip_past('endcomment') return CommentNode() @register.tag def cycle(parser, token): """ Cycle among the given strings each time this tag is encountered. Within a loop, cycles among the given strings each time through the loop:: {% for o in some_list %} <tr class="{% cycle 'row1' 'row2' %}"> ... </tr> {% endfor %} Outside of a loop, give the values a unique name the first time you call it, then use that name each successive time through:: <tr class="{% cycle 'row1' 'row2' 'row3' as rowcolors %}">...</tr> <tr class="{% cycle rowcolors %}">...</tr> <tr class="{% cycle rowcolors %}">...</tr> You can use any number of values, separated by spaces. Commas can also be used to separate values; if a comma is used, the cycle values are interpreted as literal strings. The optional flag "silent" can be used to prevent the cycle declaration from returning any value:: {% for o in some_list %} {% cycle 'row1' 'row2' as rowcolors silent %} <tr class="{{ rowcolors }}">{% include "subtemplate.html " %}</tr> {% endfor %} """ # Note: This returns the exact same node on each {% cycle name %} call; # that is, the node object returned from {% cycle a b c as name %} and the # one returned from {% cycle name %} are the exact same object. This # shouldn't cause problems (heh), but if it does, now you know. # # Ugly hack warning: This stuffs the named template dict into parser so # that names are only unique within each template (as opposed to using # a global variable, which would make cycle names have to be unique across # *all* templates. # # It keeps the last node in the parser to be able to reset it with # {% resetcycle %}. args = token.split_contents() if len(args) < 2: raise TemplateSyntaxError("'cycle' tag requires at least two arguments") if len(args) == 2: # {% cycle foo %} case. name = args[1] if not hasattr(parser, '_named_cycle_nodes'): raise TemplateSyntaxError("No named cycles in template. '%s' is not defined" % name) if name not in parser._named_cycle_nodes: raise TemplateSyntaxError("Named cycle '%s' does not exist" % name) return parser._named_cycle_nodes[name] as_form = False if len(args) > 4: # {% cycle ... as foo [silent] %} case. if args[-3] == "as": if args[-1] != "silent": raise TemplateSyntaxError("Only 'silent' flag is allowed after cycle's name, not '%s'." % args[-1]) as_form = True silent = True args = args[:-1] elif args[-2] == "as": as_form = True silent = False if as_form: name = args[-1] values = [parser.compile_filter(arg) for arg in args[1:-2]] node = CycleNode(values, name, silent=silent) if not hasattr(parser, '_named_cycle_nodes'): parser._named_cycle_nodes = {} parser._named_cycle_nodes[name] = node else: values = [parser.compile_filter(arg) for arg in args[1:]] node = CycleNode(values) parser._last_cycle_node = node return node @register.tag def csrf_token(parser, token): return CsrfTokenNode() @register.tag def debug(parser, token): """ Output a whole load of debugging information, including the current context and imported modules. Sample usage:: <pre> {% debug %} </pre> """ return DebugNode() @register.tag('filter') def do_filter(parser, token): """ Filter the contents of the block through variable filters. Filters can also be piped through each other, and they can have arguments -- just like in variable syntax. Sample usage:: {% filter force_escape|lower %} This text will be HTML-escaped, and will appear in lowercase. {% endfilter %} Note that the ``escape`` and ``safe`` filters are not acceptable arguments. Instead, use the ``autoescape`` tag to manage autoescaping for blocks of template code. """ # token.split_contents() isn't useful here because this tag doesn't accept variable as arguments _, rest = token.contents.split(None, 1) filter_expr = parser.compile_filter("var|%s" % (rest)) for func, unused in filter_expr.filters: filter_name = getattr(func, '_filter_name', None) if filter_name in ('escape', 'safe'): raise TemplateSyntaxError('"filter %s" is not permitted. Use the "autoescape" tag instead.' % filter_name) nodelist = parser.parse(('endfilter',)) parser.delete_first_token() return FilterNode(filter_expr, nodelist) @register.tag def firstof(parser, token): """ Output the first variable passed that is not False. Output nothing if all the passed variables are False. Sample usage:: {% firstof var1 var2 var3 as myvar %} This is equivalent to:: {% if var1 %} {{ var1 }} {% elif var2 %} {{ var2 }} {% elif var3 %} {{ var3 }} {% endif %} but much cleaner! You can also use a literal string as a fallback value in case all passed variables are False:: {% firstof var1 var2 var3 "fallback value" %} If you want to disable auto-escaping of variables you can use:: {% autoescape off %} {% firstof var1 var2 var3 "<strong>fallback value</strong>" %} {% autoescape %} Or if only some variables should be escaped, you can use:: {% firstof var1 var2|safe var3 "<strong>fallback value</strong>"|safe %} """ bits = token.split_contents()[1:] asvar = None if not bits: raise TemplateSyntaxError("'firstof' statement requires at least one argument") if len(bits) >= 2 and bits[-2] == 'as': asvar = bits[-1] bits = bits[:-2] return FirstOfNode([parser.compile_filter(bit) for bit in bits], asvar) @register.tag('for') def do_for(parser, token): """ Loop over each item in an array. For example, to display a list of athletes given ``athlete_list``:: <ul> {% for athlete in athlete_list %} <li>{{ athlete.name }}</li> {% endfor %} </ul> You can loop over a list in reverse by using ``{% for obj in list reversed %}``. You can also unpack multiple values from a two-dimensional array:: {% for key,value in dict.items %} {{ key }}: {{ value }} {% endfor %} The ``for`` tag can take an optional ``{% empty %}`` clause that will be displayed if the given array is empty or could not be found:: <ul> {% for athlete in athlete_list %} <li>{{ athlete.name }}</li> {% empty %} <li>Sorry, no athletes in this list.</li> {% endfor %} <ul> The above is equivalent to -- but shorter, cleaner, and possibly faster than -- the following:: <ul> {% if athlete_list %} {% for athlete in athlete_list %} <li>{{ athlete.name }}</li> {% endfor %} {% else %} <li>Sorry, no athletes in this list.</li> {% endif %} </ul> The for loop sets a number of variables available within the loop: ========================== ================================================ Variable Description ========================== ================================================ ``forloop.counter`` The current iteration of the loop (1-indexed) ``forloop.counter0`` The current iteration of the loop (0-indexed) ``forloop.revcounter`` The number of iterations from the end of the loop (1-indexed) ``forloop.revcounter0`` The number of iterations from the end of the loop (0-indexed) ``forloop.first`` True if this is the first time through the loop ``forloop.last`` True if this is the last time through the loop ``forloop.parentloop`` For nested loops, this is the loop "above" the current one ========================== ================================================ """ bits = token.split_contents() if len(bits) < 4: raise TemplateSyntaxError("'for' statements should have at least four" " words: %s" % token.contents) is_reversed = bits[-1] == 'reversed' in_index = -3 if is_reversed else -2 if bits[in_index] != 'in': raise TemplateSyntaxError("'for' statements should use the format" " 'for x in y': %s" % token.contents) invalid_chars = frozenset((' ', '"', "'", FILTER_SEPARATOR)) loopvars = re.split(r' *, *', ' '.join(bits[1:in_index])) for var in loopvars: if not var or not invalid_chars.isdisjoint(var): raise TemplateSyntaxError("'for' tag received an invalid argument:" " %s" % token.contents) sequence = parser.compile_filter(bits[in_index + 1]) nodelist_loop = parser.parse(('empty', 'endfor',)) token = parser.next_token() if token.contents == 'empty': nodelist_empty = parser.parse(('endfor',)) parser.delete_first_token() else: nodelist_empty = None return ForNode(loopvars, sequence, is_reversed, nodelist_loop, nodelist_empty) def do_ifequal(parser, token, negate): bits = list(token.split_contents()) if len(bits) != 3: raise TemplateSyntaxError("%r takes two arguments" % bits[0]) end_tag = 'end' + bits[0] nodelist_true = parser.parse(('else', end_tag)) token = parser.next_token() if token.contents == 'else': nodelist_false = parser.parse((end_tag,)) parser.delete_first_token() else: nodelist_false = NodeList() val1 = parser.compile_filter(bits[1]) val2 = parser.compile_filter(bits[2]) return IfEqualNode(val1, val2, nodelist_true, nodelist_false, negate) @register.tag def ifequal(parser, token): """ Output the contents of the block if the two arguments equal each other. Examples:: {% ifequal user.id comment.user_id %} ... {% endifequal %} {% ifnotequal user.id comment.user_id %} ... {% else %} ... {% endifnotequal %} """ return do_ifequal(parser, token, False) @register.tag def ifnotequal(parser, token): """ Output the contents of the block if the two arguments are not equal. See ifequal. """ return do_ifequal(parser, token, True) class TemplateLiteral(Literal): def __init__(self, value, text): self.value = value self.text = text # for better error messages def display(self): return self.text def eval(self, context): return self.value.resolve(context, ignore_failures=True) class TemplateIfParser(IfParser): error_class = TemplateSyntaxError def __init__(self, parser, *args, **kwargs): self.template_parser = parser super().__init__(*args, **kwargs) def create_var(self, value): return TemplateLiteral(self.template_parser.compile_filter(value), value) @register.tag('if') def do_if(parser, token): """ Evaluate a variable, and if that variable is "true" (i.e., exists, is not empty, and is not a false boolean value), output the contents of the block: :: {% if athlete_list %} Number of athletes: {{ athlete_list|count }} {% elif athlete_in_locker_room_list %} Athletes should be out of the locker room soon! {% else %} No athletes. {% endif %} In the above, if ``athlete_list`` is not empty, the number of athletes will be displayed by the ``{{ athlete_list|count }}`` variable. The ``if`` tag may take one or several `` {% elif %}`` clauses, as well as an ``{% else %}`` clause that will be displayed if all previous conditions fail. These clauses are optional. ``if`` tags may use ``or``, ``and`` or ``not`` to test a number of variables or to negate a given variable:: {% if not athlete_list %} There are no athletes. {% endif %} {% if athlete_list or coach_list %} There are some athletes or some coaches. {% endif %} {% if athlete_list and coach_list %} Both athletes and coaches are available. {% endif %} {% if not athlete_list or coach_list %} There are no athletes, or there are some coaches. {% endif %} {% if athlete_list and not coach_list %} There are some athletes and absolutely no coaches. {% endif %} Comparison operators are also available, and the use of filters is also allowed, for example:: {% if articles|length >= 5 %}...{% endif %} Arguments and operators _must_ have a space between them, so ``{% if 1>2 %}`` is not a valid if tag. All supported operators are: ``or``, ``and``, ``in``, ``not in`` ``==``, ``!=``, ``>``, ``>=``, ``<`` and ``<=``. Operator precedence follows Python. """ # {% if ... %} bits = token.split_contents()[1:] condition = TemplateIfParser(parser, bits).parse() nodelist = parser.parse(('elif', 'else', 'endif')) conditions_nodelists = [(condition, nodelist)] token = parser.next_token() # {% elif ... %} (repeatable) while token.contents.startswith('elif'): bits = token.split_contents()[1:] condition = TemplateIfParser(parser, bits).parse() nodelist = parser.parse(('elif', 'else', 'endif')) conditions_nodelists.append((condition, nodelist)) token = parser.next_token() # {% else %} (optional) if token.contents == 'else': nodelist = parser.parse(('endif',)) conditions_nodelists.append((None, nodelist)) token = parser.next_token() # {% endif %} if token.contents != 'endif': raise TemplateSyntaxError('Malformed template tag at line {}: "{}"'.format(token.lineno, token.contents)) return IfNode(conditions_nodelists) @register.tag def ifchanged(parser, token): """ Check if a value has changed from the last iteration of a loop. The ``{% ifchanged %}`` block tag is used within a loop. It has two possible uses. 1. Check its own rendered contents against its previous state and only displays the content if it has changed. For example, this displays a list of days, only displaying the month if it changes:: <h1>Archive for {{ year }}</h1> {% for date in days %} {% ifchanged %}<h3>{{ date|date:"F" }}</h3>{% endifchanged %} <a href="{{ date|date:"M/d"|lower }}/">{{ date|date:"j" }}</a> {% endfor %} 2. If given one or more variables, check whether any variable has changed. For example, the following shows the date every time it changes, while showing the hour if either the hour or the date has changed:: {% for date in days %} {% ifchanged date.date %} {{ date.date }} {% endifchanged %} {% ifchanged date.hour date.date %} {{ date.hour }} {% endifchanged %} {% endfor %} """ bits = token.split_contents() nodelist_true = parser.parse(('else', 'endifchanged')) token = parser.next_token() if token.contents == 'else': nodelist_false = parser.parse(('endifchanged',)) parser.delete_first_token() else: nodelist_false = NodeList() values = [parser.compile_filter(bit) for bit in bits[1:]] return IfChangedNode(nodelist_true, nodelist_false, *values) def find_library(parser, name): try: return parser.libraries[name] except KeyError: raise TemplateSyntaxError( "'%s' is not a registered tag library. Must be one of:\n%s" % ( name, "\n".join(sorted(parser.libraries)), ), ) def load_from_library(library, label, names): """ Return a subset of tags and filters from a library. """ subset = Library() for name in names: found = False if name in library.tags: found = True subset.tags[name] = library.tags[name] if name in library.filters: found = True subset.filters[name] = library.filters[name] if found is False: raise TemplateSyntaxError( "'%s' is not a valid tag or filter in tag library '%s'" % ( name, label, ), ) return subset @register.tag def load(parser, token): """ Load a custom template tag library into the parser. For example, to load the template tags in ``django/templatetags/news/photos.py``:: {% load news.photos %} Can also be used to load an individual tag/filter from a library:: {% load byline from news %} """ # token.split_contents() isn't useful here because this tag doesn't accept variable as arguments bits = token.contents.split() if len(bits) >= 4 and bits[-2] == "from": # from syntax is used; load individual tags from the library name = bits[-1] lib = find_library(parser, name) subset = load_from_library(lib, name, bits[1:-2]) parser.add_library(subset) else: # one or more libraries are specified; load and add them to the parser for name in bits[1:]: lib = find_library(parser, name) parser.add_library(lib) return LoadNode() @register.tag def lorem(parser, token): """ Create random Latin text useful for providing test data in templates. Usage format:: {% lorem [count] [method] [random] %} ``count`` is a number (or variable) containing the number of paragraphs or words to generate (default is 1). ``method`` is either ``w`` for words, ``p`` for HTML paragraphs, ``b`` for plain-text paragraph blocks (default is ``b``). ``random`` is the word ``random``, which if given, does not use the common paragraph (starting "Lorem ipsum dolor sit amet, consectetuer..."). Examples: * ``{% lorem %}`` outputs the common "lorem ipsum" paragraph * ``{% lorem 3 p %}`` outputs the common "lorem ipsum" paragraph and two random paragraphs each wrapped in HTML ``<p>`` tags * ``{% lorem 2 w random %}`` outputs two random latin words """ bits = list(token.split_contents()) tagname = bits[0] # Random bit common = bits[-1] != 'random' if not common: bits.pop() # Method bit if bits[-1] in ('w', 'p', 'b'): method = bits.pop() else: method = 'b' # Count bit if len(bits) > 1: count = bits.pop() else: count = '1' count = parser.compile_filter(count) if len(bits) != 1: raise TemplateSyntaxError("Incorrect format for %r tag" % tagname) return LoremNode(count, method, common) @register.tag def now(parser, token): """ Display the date, formatted according to the given string. Use the same format as PHP's ``date()`` function; see https://php.net/date for all the possible values. Sample usage:: It is {% now "jS F Y H:i" %} """ bits = token.split_contents() asvar = None if len(bits) == 4 and bits[-2] == 'as': asvar = bits[-1] bits = bits[:-2] if len(bits) != 2: raise TemplateSyntaxError("'now' statement takes one argument") format_string = bits[1][1:-1] return NowNode(format_string, asvar) @register.tag def regroup(parser, token): """ Regroup a list of alike objects by a common attribute. This complex tag is best illustrated by use of an example: say that ``musicians`` is a list of ``Musician`` objects that have ``name`` and ``instrument`` attributes, and you'd like to display a list that looks like: * Guitar: * Django Reinhardt * Emily Remler * Piano: * Lovie Austin * Bud Powell * Trumpet: * Duke Ellington The following snippet of template code would accomplish this dubious task:: {% regroup musicians by instrument as grouped %} <ul> {% for group in grouped %} <li>{{ group.grouper }} <ul> {% for musician in group.list %} <li>{{ musician.name }}</li> {% endfor %} </ul> {% endfor %} </ul> As you can see, ``{% regroup %}`` populates a variable with a list of objects with ``grouper`` and ``list`` attributes. ``grouper`` contains the item that was grouped by; ``list`` contains the list of objects that share that ``grouper``. In this case, ``grouper`` would be ``Guitar``, ``Piano`` and ``Trumpet``, and ``list`` is the list of musicians who play this instrument. Note that ``{% regroup %}`` does not work when the list to be grouped is not sorted by the key you are grouping by! This means that if your list of musicians was not sorted by instrument, you'd need to make sure it is sorted before using it, i.e.:: {% regroup musicians|dictsort:"instrument" by instrument as grouped %} """ bits = token.split_contents() if len(bits) != 6: raise TemplateSyntaxError("'regroup' tag takes five arguments") target = parser.compile_filter(bits[1]) if bits[2] != 'by': raise TemplateSyntaxError("second argument to 'regroup' tag must be 'by'") if bits[4] != 'as': raise TemplateSyntaxError("next-to-last argument to 'regroup' tag must" " be 'as'") var_name = bits[5] # RegroupNode will take each item in 'target', put it in the context under # 'var_name', evaluate 'var_name'.'expression' in the current context, and # group by the resulting value. After all items are processed, it will # save the final result in the context under 'var_name', thus clearing the # temporary values. This hack is necessary because the template engine # doesn't provide a context-aware equivalent of Python's getattr. expression = parser.compile_filter(var_name + VARIABLE_ATTRIBUTE_SEPARATOR + bits[3]) return RegroupNode(target, expression, var_name) @register.tag def resetcycle(parser, token): """ Reset a cycle tag. If an argument is given, reset the last rendered cycle tag whose name matches the argument, else reset the last rendered cycle tag (named or unnamed). """ args = token.split_contents() if len(args) > 2: raise TemplateSyntaxError("%r tag accepts at most one argument." % args[0]) if len(args) == 2: name = args[1] try: return ResetCycleNode(parser._named_cycle_nodes[name]) except (AttributeError, KeyError): raise TemplateSyntaxError("Named cycle '%s' does not exist." % name) try: return ResetCycleNode(parser._last_cycle_node) except AttributeError: raise TemplateSyntaxError("No cycles in template.") @register.tag def spaceless(parser, token): """ Remove whitespace between HTML tags, including tab and newline characters. Example usage:: {% spaceless %} <p> <a href="foo/">Foo</a> </p> {% endspaceless %} This example returns this HTML:: <p><a href="foo/">Foo</a></p> Only space between *tags* is normalized -- not space between tags and text. In this example, the space around ``Hello`` isn't stripped:: {% spaceless %} <strong> Hello </strong> {% endspaceless %} """ nodelist = parser.parse(('endspaceless',)) parser.delete_first_token() return SpacelessNode(nodelist) @register.tag def templatetag(parser, token): """ Output one of the bits used to compose template tags. Since the template system has no concept of "escaping", to display one of the bits used in template tags, you must use the ``{% templatetag %}`` tag. The argument tells which template bit to output: ================== ======= Argument Outputs ================== ======= ``openblock`` ``{%`` ``closeblock`` ``%}`` ``openvariable`` ``{{`` ``closevariable`` ``}}`` ``openbrace`` ``{`` ``closebrace`` ``}`` ``opencomment`` ``{#`` ``closecomment`` ``#}`` ================== ======= """ # token.split_contents() isn't useful here because this tag doesn't accept variable as arguments bits = token.contents.split() if len(bits) != 2: raise TemplateSyntaxError("'templatetag' statement takes one argument") tag = bits[1] if tag not in TemplateTagNode.mapping: raise TemplateSyntaxError("Invalid templatetag argument: '%s'." " Must be one of: %s" % (tag, list(TemplateTagNode.mapping))) return TemplateTagNode(tag) @register.tag def url(parser, token): r""" Return an absolute URL matching the given view with its parameters. This is a way to define links that aren't tied to a particular URL configuration:: {% url "url_name" arg1 arg2 %} or {% url "url_name" name1=value1 name2=value2 %} The first argument is a URL pattern name. Other arguments are space-separated values that will be filled in place of positional and keyword arguments in the URL. Don't mix positional and keyword arguments. All arguments for the URL must be present. For example, if you have a view ``app_name.views.client_details`` taking the client's id and the corresponding line in a URLconf looks like this:: path('client/<int:id>/', views.client_details, name='client-detail-view') and this app's URLconf is included into the project's URLconf under some path:: path('clients/', include('app_name.urls')) then in a template you can create a link for a certain client like this:: {% url "client-detail-view" client.id %} The URL will look like ``/clients/client/123/``. The first argument may also be the name of a template variable that will be evaluated to obtain the view name or the URL name, e.g.:: {% with url_name="client-detail-view" %} {% url url_name client.id %} {% endwith %} """ bits = token.split_contents() if len(bits) < 2: raise TemplateSyntaxError("'%s' takes at least one argument, a URL pattern name." % bits[0]) viewname = parser.compile_filter(bits[1]) args = [] kwargs = {} asvar = None bits = bits[2:] if len(bits) >= 2 and bits[-2] == 'as': asvar = bits[-1] bits = bits[:-2] for bit in bits: match = kwarg_re.match(bit) if not match: raise TemplateSyntaxError("Malformed arguments to url tag") name, value = match.groups() if name: kwargs[name] = parser.compile_filter(value) else: args.append(parser.compile_filter(value)) return URLNode(viewname, args, kwargs, asvar) @register.tag def verbatim(parser, token): """ Stop the template engine from rendering the contents of this block tag. Usage:: {% verbatim %} {% don't process this %} {% endverbatim %} You can also designate a specific closing tag block (allowing the unrendered use of ``{% endverbatim %}``):: {% verbatim myblock %} ... {% endverbatim myblock %} """ nodelist = parser.parse(('endverbatim',)) parser.delete_first_token() return VerbatimNode(nodelist.render(Context())) @register.tag def widthratio(parser, token): """ For creating bar charts and such. Calculate the ratio of a given value to a maximum value, and then apply that ratio to a constant. For example:: <img src="bar.png" alt="Bar" height="10" width="{% widthratio this_value max_value max_width %}"> If ``this_value`` is 175, ``max_value`` is 200, and ``max_width`` is 100, the image in the above example will be 88 pixels wide (because 175/200 = .875; .875 * 100 = 87.5 which is rounded up to 88). In some cases you might want to capture the result of widthratio in a variable. It can be useful for instance in a blocktranslate like this:: {% widthratio this_value max_value max_width as width %} {% blocktranslate %}The width is: {{ width }}{% endblocktranslate %} """ bits = token.split_contents() if len(bits) == 4: tag, this_value_expr, max_value_expr, max_width = bits asvar = None elif len(bits) == 6: tag, this_value_expr, max_value_expr, max_width, as_, asvar = bits if as_ != 'as': raise TemplateSyntaxError("Invalid syntax in widthratio tag. Expecting 'as' keyword") else: raise TemplateSyntaxError("widthratio takes at least three arguments") return WidthRatioNode(parser.compile_filter(this_value_expr), parser.compile_filter(max_value_expr), parser.compile_filter(max_width), asvar=asvar) @register.tag('with') def do_with(parser, token): """ Add one or more values to the context (inside of this block) for caching and easy access. For example:: {% with total=person.some_sql_method %} {{ total }} object{{ total|pluralize }} {% endwith %} Multiple values can be added to the context:: {% with foo=1 bar=2 %} ... {% endwith %} The legacy format of ``{% with person.some_sql_method as total %}`` is still accepted. """ bits = token.split_contents() remaining_bits = bits[1:] extra_context = token_kwargs(remaining_bits, parser, support_legacy=True) if not extra_context: raise TemplateSyntaxError("%r expected at least one variable " "assignment" % bits[0]) if remaining_bits: raise TemplateSyntaxError("%r received an invalid token: %r" % (bits[0], remaining_bits[0])) nodelist = parser.parse(('endwith',)) parser.delete_first_token() return WithNode(None, None, nodelist, extra_context=extra_context)
7b57219ce32c959d068e72e6041b9435bd983a1d648967fa7bc154866c954f9d
"""Default variable filters.""" import random as random_module import re import types from decimal import ROUND_HALF_UP, Context, Decimal, InvalidOperation from functools import wraps from operator import itemgetter from pprint import pformat from urllib.parse import quote from django.utils import formats from django.utils.dateformat import format, time_format from django.utils.encoding import iri_to_uri from django.utils.html import ( avoid_wrapping, conditional_escape, escape, escapejs, json_script as _json_script, linebreaks, strip_tags, urlize as _urlize, ) from django.utils.safestring import SafeData, mark_safe from django.utils.text import ( Truncator, normalize_newlines, phone2numeric, slugify as _slugify, wrap, ) from django.utils.timesince import timesince, timeuntil from django.utils.translation import gettext, ngettext from .base import Variable, VariableDoesNotExist from .library import Library register = Library() ####################### # STRING DECORATOR # ####################### def stringfilter(func): """ Decorator for filters which should only receive strings. The object passed as the first positional argument will be converted to a string. """ def _dec(*args, **kwargs): args = list(args) args[0] = str(args[0]) if (isinstance(args[0], SafeData) and getattr(_dec._decorated_function, 'is_safe', False)): return mark_safe(func(*args, **kwargs)) return func(*args, **kwargs) # Include a reference to the real function (used to check original # arguments by the template parser, and to bear the 'is_safe' attribute # when multiple decorators are applied). _dec._decorated_function = getattr(func, '_decorated_function', func) return wraps(func)(_dec) ################### # STRINGS # ################### @register.filter(is_safe=True) @stringfilter def addslashes(value): """ Add slashes before quotes. Useful for escaping strings in CSV, for example. Less useful for escaping JavaScript; use the ``escapejs`` filter instead. """ return value.replace('\\', '\\\\').replace('"', '\\"').replace("'", "\\'") @register.filter(is_safe=True) @stringfilter def capfirst(value): """Capitalize the first character of the value.""" return value and value[0].upper() + value[1:] @register.filter("escapejs") @stringfilter def escapejs_filter(value): """Hex encode characters for use in JavaScript strings.""" return escapejs(value) @register.filter(is_safe=True) def json_script(value, element_id): """ Output value JSON-encoded, wrapped in a <script type="application/json"> tag. """ return _json_script(value, element_id) @register.filter(is_safe=True) def floatformat(text, arg=-1): """ Display a float to a specified number of decimal places. If called without an argument, display the floating point number with one decimal place -- but only if there's a decimal place to be displayed: * num1 = 34.23234 * num2 = 34.00000 * num3 = 34.26000 * {{ num1|floatformat }} displays "34.2" * {{ num2|floatformat }} displays "34" * {{ num3|floatformat }} displays "34.3" If arg is positive, always display exactly arg number of decimal places: * {{ num1|floatformat:3 }} displays "34.232" * {{ num2|floatformat:3 }} displays "34.000" * {{ num3|floatformat:3 }} displays "34.260" If arg is negative, display arg number of decimal places -- but only if there are places to be displayed: * {{ num1|floatformat:"-3" }} displays "34.232" * {{ num2|floatformat:"-3" }} displays "34" * {{ num3|floatformat:"-3" }} displays "34.260" If the input float is infinity or NaN, display the string representation of that value. """ try: input_val = repr(text) d = Decimal(input_val) except InvalidOperation: try: d = Decimal(str(float(text))) except (ValueError, InvalidOperation, TypeError): return '' try: p = int(arg) except ValueError: return input_val try: m = int(d) - d except (ValueError, OverflowError, InvalidOperation): return input_val if not m and p < 0: return mark_safe(formats.number_format('%d' % (int(d)), 0)) exp = Decimal(1).scaleb(-abs(p)) # Set the precision high enough to avoid an exception (#15789). tupl = d.as_tuple() units = len(tupl[1]) units += -tupl[2] if m else tupl[2] prec = abs(p) + units + 1 # Avoid conversion to scientific notation by accessing `sign`, `digits`, # and `exponent` from Decimal.as_tuple() directly. rounded_d = d.quantize(exp, ROUND_HALF_UP, Context(prec=prec)) sign, digits, exponent = rounded_d.as_tuple() digits = [str(digit) for digit in reversed(digits)] while len(digits) <= abs(exponent): digits.append('0') digits.insert(-exponent, '.') if sign and rounded_d: digits.append('-') number = ''.join(reversed(digits)) return mark_safe(formats.number_format(number, abs(p))) @register.filter(is_safe=True) @stringfilter def iriencode(value): """Escape an IRI value for use in a URL.""" return iri_to_uri(value) @register.filter(is_safe=True, needs_autoescape=True) @stringfilter def linenumbers(value, autoescape=True): """Display text with line numbers.""" lines = value.split('\n') # Find the maximum width of the line count, for use with zero padding # string format command width = str(len(str(len(lines)))) if not autoescape or isinstance(value, SafeData): for i, line in enumerate(lines): lines[i] = ("%0" + width + "d. %s") % (i + 1, line) else: for i, line in enumerate(lines): lines[i] = ("%0" + width + "d. %s") % (i + 1, escape(line)) return mark_safe('\n'.join(lines)) @register.filter(is_safe=True) @stringfilter def lower(value): """Convert a string into all lowercase.""" return value.lower() @register.filter(is_safe=False) @stringfilter def make_list(value): """ Return the value turned into a list. For an integer, it's a list of digits. For a string, it's a list of characters. """ return list(value) @register.filter(is_safe=True) @stringfilter def slugify(value): """ Convert to ASCII. Convert spaces to hyphens. Remove characters that aren't alphanumerics, underscores, or hyphens. Convert to lowercase. Also strip leading and trailing whitespace. """ return _slugify(value) @register.filter(is_safe=True) def stringformat(value, arg): """ Format the variable according to the arg, a string formatting specifier. This specifier uses Python string formatting syntax, with the exception that the leading "%" is dropped. See https://docs.python.org/library/stdtypes.html#printf-style-string-formatting for documentation of Python string formatting. """ if isinstance(value, tuple): value = str(value) try: return ("%" + str(arg)) % value except (ValueError, TypeError): return "" @register.filter(is_safe=True) @stringfilter def title(value): """Convert a string into titlecase.""" t = re.sub("([a-z])'([A-Z])", lambda m: m.group(0).lower(), value.title()) return re.sub(r"\d([A-Z])", lambda m: m.group(0).lower(), t) @register.filter(is_safe=True) @stringfilter def truncatechars(value, arg): """Truncate a string after `arg` number of characters.""" try: length = int(arg) except ValueError: # Invalid literal for int(). return value # Fail silently. return Truncator(value).chars(length) @register.filter(is_safe=True) @stringfilter def truncatechars_html(value, arg): """ Truncate HTML after `arg` number of chars. Preserve newlines in the HTML. """ try: length = int(arg) except ValueError: # invalid literal for int() return value # Fail silently. return Truncator(value).chars(length, html=True) @register.filter(is_safe=True) @stringfilter def truncatewords(value, arg): """ Truncate a string after `arg` number of words. Remove newlines within the string. """ try: length = int(arg) except ValueError: # Invalid literal for int(). return value # Fail silently. return Truncator(value).words(length, truncate=' …') @register.filter(is_safe=True) @stringfilter def truncatewords_html(value, arg): """ Truncate HTML after `arg` number of words. Preserve newlines in the HTML. """ try: length = int(arg) except ValueError: # invalid literal for int() return value # Fail silently. return Truncator(value).words(length, html=True, truncate=' …') @register.filter(is_safe=False) @stringfilter def upper(value): """Convert a string into all uppercase.""" return value.upper() @register.filter(is_safe=False) @stringfilter def urlencode(value, safe=None): """ Escape a value for use in a URL. The ``safe`` parameter determines the characters which should not be escaped by Python's quote() function. If not provided, use the default safe characters (but an empty string can be provided when *all* characters should be escaped). """ kwargs = {} if safe is not None: kwargs['safe'] = safe return quote(value, **kwargs) @register.filter(is_safe=True, needs_autoescape=True) @stringfilter def urlize(value, autoescape=True): """Convert URLs in plain text into clickable links.""" return mark_safe(_urlize(value, nofollow=True, autoescape=autoescape)) @register.filter(is_safe=True, needs_autoescape=True) @stringfilter def urlizetrunc(value, limit, autoescape=True): """ Convert URLs into clickable links, truncating URLs to the given character limit, and adding 'rel=nofollow' attribute to discourage spamming. Argument: Length to truncate URLs to. """ return mark_safe(_urlize(value, trim_url_limit=int(limit), nofollow=True, autoescape=autoescape)) @register.filter(is_safe=False) @stringfilter def wordcount(value): """Return the number of words.""" return len(value.split()) @register.filter(is_safe=True) @stringfilter def wordwrap(value, arg): """Wrap words at `arg` line length.""" return wrap(value, int(arg)) @register.filter(is_safe=True) @stringfilter def ljust(value, arg): """Left-align the value in a field of a given width.""" return value.ljust(int(arg)) @register.filter(is_safe=True) @stringfilter def rjust(value, arg): """Right-align the value in a field of a given width.""" return value.rjust(int(arg)) @register.filter(is_safe=True) @stringfilter def center(value, arg): """Center the value in a field of a given width.""" return value.center(int(arg)) @register.filter @stringfilter def cut(value, arg): """Remove all values of arg from the given string.""" safe = isinstance(value, SafeData) value = value.replace(arg, '') if safe and arg != ';': return mark_safe(value) return value ################### # HTML STRINGS # ################### @register.filter("escape", is_safe=True) @stringfilter def escape_filter(value): """Mark the value as a string that should be auto-escaped.""" return conditional_escape(value) @register.filter(is_safe=True) @stringfilter def force_escape(value): """ Escape a string's HTML. Return a new string containing the escaped characters (as opposed to "escape", which marks the content for later possible escaping). """ return escape(value) @register.filter("linebreaks", is_safe=True, needs_autoescape=True) @stringfilter def linebreaks_filter(value, autoescape=True): """ Replace line breaks in plain text with appropriate HTML; a single newline becomes an HTML line break (``<br>``) and a new line followed by a blank line becomes a paragraph break (``</p>``). """ autoescape = autoescape and not isinstance(value, SafeData) return mark_safe(linebreaks(value, autoescape)) @register.filter(is_safe=True, needs_autoescape=True) @stringfilter def linebreaksbr(value, autoescape=True): """ Convert all newlines in a piece of plain text to HTML line breaks (``<br>``). """ autoescape = autoescape and not isinstance(value, SafeData) value = normalize_newlines(value) if autoescape: value = escape(value) return mark_safe(value.replace('\n', '<br>')) @register.filter(is_safe=True) @stringfilter def safe(value): """Mark the value as a string that should not be auto-escaped.""" return mark_safe(value) @register.filter(is_safe=True) def safeseq(value): """ A "safe" filter for sequences. Mark each element in the sequence, individually, as safe, after converting them to strings. Return a list with the results. """ return [mark_safe(obj) for obj in value] @register.filter(is_safe=True) @stringfilter def striptags(value): """Strip all [X]HTML tags.""" return strip_tags(value) ################### # LISTS # ################### def _property_resolver(arg): """ When arg is convertible to float, behave like operator.itemgetter(arg) Otherwise, behave like Variable(arg).resolve >>> _property_resolver(1)('abc') 'b' >>> _property_resolver('1')('abc') Traceback (most recent call last): ... TypeError: string indices must be integers >>> class Foo: ... a = 42 ... b = 3.14 ... c = 'Hey!' >>> _property_resolver('b')(Foo()) 3.14 """ try: float(arg) except ValueError: return Variable(arg).resolve else: return itemgetter(arg) @register.filter(is_safe=False) def dictsort(value, arg): """ Given a list of dicts, return that list sorted by the property given in the argument. """ try: return sorted(value, key=_property_resolver(arg)) except (TypeError, VariableDoesNotExist): return '' @register.filter(is_safe=False) def dictsortreversed(value, arg): """ Given a list of dicts, return that list sorted in reverse order by the property given in the argument. """ try: return sorted(value, key=_property_resolver(arg), reverse=True) except (TypeError, VariableDoesNotExist): return '' @register.filter(is_safe=False) def first(value): """Return the first item in a list.""" try: return value[0] except IndexError: return '' @register.filter(is_safe=True, needs_autoescape=True) def join(value, arg, autoescape=True): """Join a list with a string, like Python's ``str.join(list)``.""" try: if autoescape: value = [conditional_escape(v) for v in value] data = conditional_escape(arg).join(value) except TypeError: # Fail silently if arg isn't iterable. return value return mark_safe(data) @register.filter(is_safe=True) def last(value): """Return the last item in a list.""" try: return value[-1] except IndexError: return '' @register.filter(is_safe=False) def length(value): """Return the length of the value - useful for lists.""" try: return len(value) except (ValueError, TypeError): return 0 @register.filter(is_safe=False) def length_is(value, arg): """Return a boolean of whether the value's length is the argument.""" try: return len(value) == int(arg) except (ValueError, TypeError): return '' @register.filter(is_safe=True) def random(value): """Return a random item from the list.""" return random_module.choice(value) @register.filter("slice", is_safe=True) def slice_filter(value, arg): """ Return a slice of the list using the same syntax as Python's list slicing. """ try: bits = [] for x in str(arg).split(':'): if not x: bits.append(None) else: bits.append(int(x)) return value[slice(*bits)] except (ValueError, TypeError): return value # Fail silently. @register.filter(is_safe=True, needs_autoescape=True) def unordered_list(value, autoescape=True): """ Recursively take a self-nested list and return an HTML unordered list -- WITHOUT opening and closing <ul> tags. Assume the list is in the proper format. For example, if ``var`` contains: ``['States', ['Kansas', ['Lawrence', 'Topeka'], 'Illinois']]``, then ``{{ var|unordered_list }}`` returns:: <li>States <ul> <li>Kansas <ul> <li>Lawrence</li> <li>Topeka</li> </ul> </li> <li>Illinois</li> </ul> </li> """ if autoescape: escaper = conditional_escape else: def escaper(x): return x def walk_items(item_list): item_iterator = iter(item_list) try: item = next(item_iterator) while True: try: next_item = next(item_iterator) except StopIteration: yield item, None break if isinstance(next_item, (list, tuple, types.GeneratorType)): try: iter(next_item) except TypeError: pass else: yield item, next_item item = next(item_iterator) continue yield item, None item = next_item except StopIteration: pass def list_formatter(item_list, tabs=1): indent = '\t' * tabs output = [] for item, children in walk_items(item_list): sublist = '' if children: sublist = '\n%s<ul>\n%s\n%s</ul>\n%s' % ( indent, list_formatter(children, tabs + 1), indent, indent) output.append('%s<li>%s%s</li>' % ( indent, escaper(item), sublist)) return '\n'.join(output) return mark_safe(list_formatter(value)) ################### # INTEGERS # ################### @register.filter(is_safe=False) def add(value, arg): """Add the arg to the value.""" try: return int(value) + int(arg) except (ValueError, TypeError): try: return value + arg except Exception: return '' @register.filter(is_safe=False) def get_digit(value, arg): """ Given a whole number, return the requested digit of it, where 1 is the right-most digit, 2 is the second-right-most digit, etc. Return the original value for invalid input (if input or argument is not an integer, or if argument is less than 1). Otherwise, output is always an integer. """ try: arg = int(arg) value = int(value) except ValueError: return value # Fail silently for an invalid argument if arg < 1: return value try: return int(str(value)[-arg]) except IndexError: return 0 ################### # DATES # ################### @register.filter(expects_localtime=True, is_safe=False) def date(value, arg=None): """Format a date according to the given format.""" if value in (None, ''): return '' try: return formats.date_format(value, arg) except AttributeError: try: return format(value, arg) except AttributeError: return '' @register.filter(expects_localtime=True, is_safe=False) def time(value, arg=None): """Format a time according to the given format.""" if value in (None, ''): return '' try: return formats.time_format(value, arg) except (AttributeError, TypeError): try: return time_format(value, arg) except (AttributeError, TypeError): return '' @register.filter("timesince", is_safe=False) def timesince_filter(value, arg=None): """Format a date as the time since that date (i.e. "4 days, 6 hours").""" if not value: return '' try: if arg: return timesince(value, arg) return timesince(value) except (ValueError, TypeError): return '' @register.filter("timeuntil", is_safe=False) def timeuntil_filter(value, arg=None): """Format a date as the time until that date (i.e. "4 days, 6 hours").""" if not value: return '' try: return timeuntil(value, arg) except (ValueError, TypeError): return '' ################### # LOGIC # ################### @register.filter(is_safe=False) def default(value, arg): """If value is unavailable, use given default.""" return value or arg @register.filter(is_safe=False) def default_if_none(value, arg): """If value is None, use given default.""" if value is None: return arg return value @register.filter(is_safe=False) def divisibleby(value, arg): """Return True if the value is divisible by the argument.""" return int(value) % int(arg) == 0 @register.filter(is_safe=False) def yesno(value, arg=None): """ Given a string mapping values for true, false, and (optionally) None, return one of those strings according to the value: ========== ====================== ================================== Value Argument Outputs ========== ====================== ================================== ``True`` ``"yeah,no,maybe"`` ``yeah`` ``False`` ``"yeah,no,maybe"`` ``no`` ``None`` ``"yeah,no,maybe"`` ``maybe`` ``None`` ``"yeah,no"`` ``"no"`` (converts None to False if no mapping for None is given. ========== ====================== ================================== """ if arg is None: # Translators: Please do not add spaces around commas. arg = gettext('yes,no,maybe') bits = arg.split(',') if len(bits) < 2: return value # Invalid arg. try: yes, no, maybe = bits except ValueError: # Unpack list of wrong size (no "maybe" value provided). yes, no, maybe = bits[0], bits[1], bits[1] if value is None: return maybe if value: return yes return no ################### # MISC # ################### @register.filter(is_safe=True) def filesizeformat(bytes_): """ Format the value like a 'human-readable' file size (i.e. 13 KB, 4.1 MB, 102 bytes, etc.). """ try: bytes_ = int(bytes_) except (TypeError, ValueError, UnicodeDecodeError): value = ngettext("%(size)d byte", "%(size)d bytes", 0) % {'size': 0} return avoid_wrapping(value) def filesize_number_format(value): return formats.number_format(round(value, 1), 1) KB = 1 << 10 MB = 1 << 20 GB = 1 << 30 TB = 1 << 40 PB = 1 << 50 negative = bytes_ < 0 if negative: bytes_ = -bytes_ # Allow formatting of negative numbers. if bytes_ < KB: value = ngettext("%(size)d byte", "%(size)d bytes", bytes_) % {'size': bytes_} elif bytes_ < MB: value = gettext("%s KB") % filesize_number_format(bytes_ / KB) elif bytes_ < GB: value = gettext("%s MB") % filesize_number_format(bytes_ / MB) elif bytes_ < TB: value = gettext("%s GB") % filesize_number_format(bytes_ / GB) elif bytes_ < PB: value = gettext("%s TB") % filesize_number_format(bytes_ / TB) else: value = gettext("%s PB") % filesize_number_format(bytes_ / PB) if negative: value = "-%s" % value return avoid_wrapping(value) @register.filter(is_safe=False) def pluralize(value, arg='s'): """ Return a plural suffix if the value is not 1, '1', or an object of length 1. By default, use 's' as the suffix: * If value is 0, vote{{ value|pluralize }} display "votes". * If value is 1, vote{{ value|pluralize }} display "vote". * If value is 2, vote{{ value|pluralize }} display "votes". If an argument is provided, use that string instead: * If value is 0, class{{ value|pluralize:"es" }} display "classes". * If value is 1, class{{ value|pluralize:"es" }} display "class". * If value is 2, class{{ value|pluralize:"es" }} display "classes". If the provided argument contains a comma, use the text before the comma for the singular case and the text after the comma for the plural case: * If value is 0, cand{{ value|pluralize:"y,ies" }} display "candies". * If value is 1, cand{{ value|pluralize:"y,ies" }} display "candy". * If value is 2, cand{{ value|pluralize:"y,ies" }} display "candies". """ if ',' not in arg: arg = ',' + arg bits = arg.split(',') if len(bits) > 2: return '' singular_suffix, plural_suffix = bits[:2] try: return singular_suffix if float(value) == 1 else plural_suffix except ValueError: # Invalid string that's not a number. pass except TypeError: # Value isn't a string or a number; maybe it's a list? try: return singular_suffix if len(value) == 1 else plural_suffix except TypeError: # len() of unsized object. pass return '' @register.filter("phone2numeric", is_safe=True) def phone2numeric_filter(value): """Take a phone number and converts it in to its numerical equivalent.""" return phone2numeric(value) @register.filter(is_safe=True) def pprint(value): """A wrapper around pprint.pprint -- for debugging, really.""" try: return pformat(value) except Exception as e: return "Error in formatting: %s: %s" % (e.__class__.__name__, e)
3f54c48284e8516ca7c516771a6906b582e28a0dfa7b15ac441a05f5bf96c32b
import re from django.conf import settings from django.http import HttpResponsePermanentRedirect from django.utils.deprecation import MiddlewareMixin class SecurityMiddleware(MiddlewareMixin): # RemovedInDjango40Warning: when the deprecation ends, replace with: # def __init__(self, get_response): def __init__(self, get_response=None): self._get_response_none_deprecation(get_response) self.sts_seconds = settings.SECURE_HSTS_SECONDS self.sts_include_subdomains = settings.SECURE_HSTS_INCLUDE_SUBDOMAINS self.sts_preload = settings.SECURE_HSTS_PRELOAD self.content_type_nosniff = settings.SECURE_CONTENT_TYPE_NOSNIFF self.xss_filter = settings.SECURE_BROWSER_XSS_FILTER self.redirect = settings.SECURE_SSL_REDIRECT self.redirect_host = settings.SECURE_SSL_HOST self.redirect_exempt = [re.compile(r) for r in settings.SECURE_REDIRECT_EXEMPT] self.referrer_policy = settings.SECURE_REFERRER_POLICY self.get_response = get_response def process_request(self, request): path = request.path.lstrip("/") if (self.redirect and not request.is_secure() and not any(pattern.search(path) for pattern in self.redirect_exempt)): host = self.redirect_host or request.get_host() return HttpResponsePermanentRedirect( "https://%s%s" % (host, request.get_full_path()) ) def process_response(self, request, response): if (self.sts_seconds and request.is_secure() and 'Strict-Transport-Security' not in response): sts_header = "max-age=%s" % self.sts_seconds if self.sts_include_subdomains: sts_header = sts_header + "; includeSubDomains" if self.sts_preload: sts_header = sts_header + "; preload" response['Strict-Transport-Security'] = sts_header if self.content_type_nosniff: response.setdefault('X-Content-Type-Options', 'nosniff') if self.xss_filter: response.setdefault('X-XSS-Protection', '1; mode=block') if self.referrer_policy: # Support a comma-separated string or iterable of values to allow # fallback. response.setdefault('Referrer-Policy', ','.join( [v.strip() for v in self.referrer_policy.split(',')] if isinstance(self.referrer_policy, str) else self.referrer_policy )) return response
c5cbd8e0974c039c21a1e76ab6dfce270486b891545d1b565f1275ef70edbef0
from django.conf import settings from django.conf.urls.i18n import is_language_prefix_patterns_used from django.http import HttpResponseRedirect from django.urls import get_script_prefix, is_valid_path from django.utils import translation from django.utils.cache import patch_vary_headers from django.utils.deprecation import MiddlewareMixin class LocaleMiddleware(MiddlewareMixin): """ Parse a request and decide what translation object to install in the current thread context. This allows pages to be dynamically translated to the language the user desires (if the language is available). """ response_redirect_class = HttpResponseRedirect def process_request(self, request): urlconf = getattr(request, 'urlconf', settings.ROOT_URLCONF) i18n_patterns_used, prefixed_default_language = is_language_prefix_patterns_used(urlconf) language = translation.get_language_from_request(request, check_path=i18n_patterns_used) language_from_path = translation.get_language_from_path(request.path_info) if not language_from_path and i18n_patterns_used and not prefixed_default_language: language = settings.LANGUAGE_CODE translation.activate(language) request.LANGUAGE_CODE = translation.get_language() def process_response(self, request, response): language = translation.get_language() language_from_path = translation.get_language_from_path(request.path_info) urlconf = getattr(request, 'urlconf', settings.ROOT_URLCONF) i18n_patterns_used, prefixed_default_language = is_language_prefix_patterns_used(urlconf) if (response.status_code == 404 and not language_from_path and i18n_patterns_used and prefixed_default_language): # Maybe the language code is missing in the URL? Try adding the # language prefix and redirecting to that URL. language_path = '/%s%s' % (language, request.path_info) path_valid = is_valid_path(language_path, urlconf) path_needs_slash = ( not path_valid and ( settings.APPEND_SLASH and not language_path.endswith('/') and is_valid_path('%s/' % language_path, urlconf) ) ) if path_valid or path_needs_slash: script_prefix = get_script_prefix() # Insert language after the script prefix and before the # rest of the URL language_url = request.get_full_path(force_append_slash=path_needs_slash).replace( script_prefix, '%s%s/' % (script_prefix, language), 1 ) return self.response_redirect_class(language_url) if not (i18n_patterns_used and language_from_path): patch_vary_headers(response, ('Accept-Language',)) response.setdefault('Content-Language', language) return response
e1d81461ecd23b123e3c46587e074e61a54f99822626684a14f4348fd1063127
""" Cache middleware. If enabled, each Django-powered page will be cached based on URL. The canonical way to enable cache middleware is to set ``UpdateCacheMiddleware`` as your first piece of middleware, and ``FetchFromCacheMiddleware`` as the last:: MIDDLEWARE = [ 'django.middleware.cache.UpdateCacheMiddleware', ... 'django.middleware.cache.FetchFromCacheMiddleware' ] This is counter-intuitive, but correct: ``UpdateCacheMiddleware`` needs to run last during the response phase, which processes middleware bottom-up; ``FetchFromCacheMiddleware`` needs to run last during the request phase, which processes middleware top-down. The single-class ``CacheMiddleware`` can be used for some simple sites. However, if any other piece of middleware needs to affect the cache key, you'll need to use the two-part ``UpdateCacheMiddleware`` and ``FetchFromCacheMiddleware``. This'll most often happen when you're using Django's ``LocaleMiddleware``. More details about how the caching works: * Only GET or HEAD-requests with status code 200 are cached. * The number of seconds each page is stored for is set by the "max-age" section of the response's "Cache-Control" header, falling back to the CACHE_MIDDLEWARE_SECONDS setting if the section was not found. * This middleware expects that a HEAD request is answered with the same response headers exactly like the corresponding GET request. * When a hit occurs, a shallow copy of the original response object is returned from process_request. * Pages will be cached based on the contents of the request headers listed in the response's "Vary" header. * This middleware also sets ETag, Last-Modified, Expires and Cache-Control headers on the response object. """ from django.conf import settings from django.core.cache import DEFAULT_CACHE_ALIAS, caches from django.utils.cache import ( get_cache_key, get_max_age, has_vary_header, learn_cache_key, patch_response_headers, ) from django.utils.deprecation import MiddlewareMixin class UpdateCacheMiddleware(MiddlewareMixin): """ Response-phase cache middleware that updates the cache if the response is cacheable. Must be used as part of the two-part update/fetch cache middleware. UpdateCacheMiddleware must be the first piece of middleware in MIDDLEWARE so that it'll get called last during the response phase. """ # RemovedInDjango40Warning: when the deprecation ends, replace with: # def __init__(self, get_response): def __init__(self, get_response=None): self._get_response_none_deprecation(get_response) self.cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS self.page_timeout = None self.key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX self.cache_alias = settings.CACHE_MIDDLEWARE_ALIAS self.cache = caches[self.cache_alias] self.get_response = get_response def _should_update_cache(self, request, response): return hasattr(request, '_cache_update_cache') and request._cache_update_cache def process_response(self, request, response): """Set the cache, if needed.""" if not self._should_update_cache(request, response): # We don't need to update the cache, just return. return response if response.streaming or response.status_code not in (200, 304): return response # Don't cache responses that set a user-specific (and maybe security # sensitive) cookie in response to a cookie-less request. if not request.COOKIES and response.cookies and has_vary_header(response, 'Cookie'): return response # Don't cache a response with 'Cache-Control: private' if 'private' in response.get('Cache-Control', ()): return response # Page timeout takes precedence over the "max-age" and the default # cache timeout. timeout = self.page_timeout if timeout is None: # The timeout from the "max-age" section of the "Cache-Control" # header takes precedence over the default cache timeout. timeout = get_max_age(response) if timeout is None: timeout = self.cache_timeout elif timeout == 0: # max-age was set to 0, don't cache. return response patch_response_headers(response, timeout) if timeout and response.status_code == 200: cache_key = learn_cache_key(request, response, timeout, self.key_prefix, cache=self.cache) if hasattr(response, 'render') and callable(response.render): response.add_post_render_callback( lambda r: self.cache.set(cache_key, r, timeout) ) else: self.cache.set(cache_key, response, timeout) return response class FetchFromCacheMiddleware(MiddlewareMixin): """ Request-phase cache middleware that fetches a page from the cache. Must be used as part of the two-part update/fetch cache middleware. FetchFromCacheMiddleware must be the last piece of middleware in MIDDLEWARE so that it'll get called last during the request phase. """ # RemovedInDjango40Warning: when the deprecation ends, replace with: # def __init__(self, get_response): def __init__(self, get_response=None): self._get_response_none_deprecation(get_response) self.key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX self.cache_alias = settings.CACHE_MIDDLEWARE_ALIAS self.cache = caches[self.cache_alias] self.get_response = get_response def process_request(self, request): """ Check whether the page is already cached and return the cached version if available. """ if request.method not in ('GET', 'HEAD'): request._cache_update_cache = False return None # Don't bother checking the cache. # try and get the cached GET response cache_key = get_cache_key(request, self.key_prefix, 'GET', cache=self.cache) if cache_key is None: request._cache_update_cache = True return None # No cache information available, need to rebuild. response = self.cache.get(cache_key) # if it wasn't found and we are looking for a HEAD, try looking just for that if response is None and request.method == 'HEAD': cache_key = get_cache_key(request, self.key_prefix, 'HEAD', cache=self.cache) response = self.cache.get(cache_key) if response is None: request._cache_update_cache = True return None # No cache information available, need to rebuild. # hit, return cached response request._cache_update_cache = False return response class CacheMiddleware(UpdateCacheMiddleware, FetchFromCacheMiddleware): """ Cache middleware that provides basic behavior for many simple sites. Also used as the hook point for the cache decorator, which is generated using the decorator-from-middleware utility. """ # RemovedInDjango40Warning: when the deprecation ends, replace with: # def __init__(self, get_response, cache_timeout=None, page_timeout=None, **kwargs): def __init__(self, get_response=None, cache_timeout=None, page_timeout=None, **kwargs): self._get_response_none_deprecation(get_response) self.get_response = get_response # We need to differentiate between "provided, but using default value", # and "not provided". If the value is provided using a default, then # we fall back to system defaults. If it is not provided at all, # we need to use middleware defaults. try: key_prefix = kwargs['key_prefix'] if key_prefix is None: key_prefix = '' except KeyError: key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX self.key_prefix = key_prefix try: cache_alias = kwargs['cache_alias'] if cache_alias is None: cache_alias = DEFAULT_CACHE_ALIAS except KeyError: cache_alias = settings.CACHE_MIDDLEWARE_ALIAS self.cache_alias = cache_alias if cache_timeout is None: cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS self.cache_timeout = cache_timeout self.page_timeout = page_timeout self.cache = caches[self.cache_alias]
2898147174a56f6f5b8e5345ee75ff88e49316db9e487e41b315d1c9dfa5c3a7
""" Cross Site Request Forgery Middleware. This module provides a middleware that implements protection against request forgeries from other sites. """ import logging import re import string from urllib.parse import urlparse from django.conf import settings from django.core.exceptions import DisallowedHost, ImproperlyConfigured from django.urls import get_callable from django.utils.cache import patch_vary_headers from django.utils.crypto import constant_time_compare, get_random_string from django.utils.deprecation import MiddlewareMixin from django.utils.http import is_same_domain from django.utils.log import log_response logger = logging.getLogger('django.security.csrf') REASON_NO_REFERER = "Referer checking failed - no Referer." REASON_BAD_REFERER = "Referer checking failed - %s does not match any trusted origins." REASON_NO_CSRF_COOKIE = "CSRF cookie not set." REASON_BAD_TOKEN = "CSRF token missing or incorrect." REASON_MALFORMED_REFERER = "Referer checking failed - Referer is malformed." REASON_INSECURE_REFERER = "Referer checking failed - Referer is insecure while host is secure." CSRF_SECRET_LENGTH = 32 CSRF_TOKEN_LENGTH = 2 * CSRF_SECRET_LENGTH CSRF_ALLOWED_CHARS = string.ascii_letters + string.digits CSRF_SESSION_KEY = '_csrftoken' def _get_failure_view(): """Return the view to be used for CSRF rejections.""" return get_callable(settings.CSRF_FAILURE_VIEW) def _get_new_csrf_string(): return get_random_string(CSRF_SECRET_LENGTH, allowed_chars=CSRF_ALLOWED_CHARS) def _mask_cipher_secret(secret): """ Given a secret (assumed to be a string of CSRF_ALLOWED_CHARS), generate a token by adding a mask and applying it to the secret. """ mask = _get_new_csrf_string() chars = CSRF_ALLOWED_CHARS pairs = zip((chars.index(x) for x in secret), (chars.index(x) for x in mask)) cipher = ''.join(chars[(x + y) % len(chars)] for x, y in pairs) return mask + cipher def _unmask_cipher_token(token): """ Given a token (assumed to be a string of CSRF_ALLOWED_CHARS, of length CSRF_TOKEN_LENGTH, and that its first half is a mask), use it to decrypt the second half to produce the original secret. """ mask = token[:CSRF_SECRET_LENGTH] token = token[CSRF_SECRET_LENGTH:] chars = CSRF_ALLOWED_CHARS pairs = zip((chars.index(x) for x in token), (chars.index(x) for x in mask)) return ''.join(chars[x - y] for x, y in pairs) # Note negative values are ok def _get_new_csrf_token(): return _mask_cipher_secret(_get_new_csrf_string()) def get_token(request): """ Return the CSRF token required for a POST form. The token is an alphanumeric value. A new token is created if one is not already set. A side effect of calling this function is to make the csrf_protect decorator and the CsrfViewMiddleware add a CSRF cookie and a 'Vary: Cookie' header to the outgoing response. For this reason, you may need to use this function lazily, as is done by the csrf context processor. """ if "CSRF_COOKIE" not in request.META: csrf_secret = _get_new_csrf_string() request.META["CSRF_COOKIE"] = _mask_cipher_secret(csrf_secret) else: csrf_secret = _unmask_cipher_token(request.META["CSRF_COOKIE"]) request.META["CSRF_COOKIE_USED"] = True return _mask_cipher_secret(csrf_secret) def rotate_token(request): """ Change the CSRF token in use for a request - should be done on login for security purposes. """ request.META.update({ "CSRF_COOKIE_USED": True, "CSRF_COOKIE": _get_new_csrf_token(), }) request.csrf_cookie_needs_reset = True def _sanitize_token(token): # Allow only ASCII alphanumerics if re.search('[^a-zA-Z0-9]', token): return _get_new_csrf_token() elif len(token) == CSRF_TOKEN_LENGTH: return token elif len(token) == CSRF_SECRET_LENGTH: # Older Django versions set cookies to values of CSRF_SECRET_LENGTH # alphanumeric characters. For backwards compatibility, accept # such values as unmasked secrets. # It's easier to mask here and be consistent later, rather than add # different code paths in the checks, although that might be a tad more # efficient. return _mask_cipher_secret(token) return _get_new_csrf_token() def _compare_masked_tokens(request_csrf_token, csrf_token): # Assume both arguments are sanitized -- that is, strings of # length CSRF_TOKEN_LENGTH, all CSRF_ALLOWED_CHARS. return constant_time_compare( _unmask_cipher_token(request_csrf_token), _unmask_cipher_token(csrf_token), ) class CsrfViewMiddleware(MiddlewareMixin): """ Require a present and correct csrfmiddlewaretoken for POST requests that have a CSRF cookie, and set an outgoing CSRF cookie. This middleware should be used in conjunction with the {% csrf_token %} template tag. """ # The _accept and _reject methods currently only exist for the sake of the # requires_csrf_token decorator. def _accept(self, request): # Avoid checking the request twice by adding a custom attribute to # request. This will be relevant when both decorator and middleware # are used. request.csrf_processing_done = True return None def _reject(self, request, reason): response = _get_failure_view()(request, reason=reason) log_response( 'Forbidden (%s): %s', reason, request.path, response=response, request=request, logger=logger, ) return response def _get_token(self, request): if settings.CSRF_USE_SESSIONS: try: return request.session.get(CSRF_SESSION_KEY) except AttributeError: raise ImproperlyConfigured( 'CSRF_USE_SESSIONS is enabled, but request.session is not ' 'set. SessionMiddleware must appear before CsrfViewMiddleware ' 'in MIDDLEWARE.' ) else: try: cookie_token = request.COOKIES[settings.CSRF_COOKIE_NAME] except KeyError: return None csrf_token = _sanitize_token(cookie_token) if csrf_token != cookie_token: # Cookie token needed to be replaced; # the cookie needs to be reset. request.csrf_cookie_needs_reset = True return csrf_token def _set_token(self, request, response): if settings.CSRF_USE_SESSIONS: if request.session.get(CSRF_SESSION_KEY) != request.META['CSRF_COOKIE']: request.session[CSRF_SESSION_KEY] = request.META['CSRF_COOKIE'] else: response.set_cookie( settings.CSRF_COOKIE_NAME, request.META['CSRF_COOKIE'], max_age=settings.CSRF_COOKIE_AGE, domain=settings.CSRF_COOKIE_DOMAIN, path=settings.CSRF_COOKIE_PATH, secure=settings.CSRF_COOKIE_SECURE, httponly=settings.CSRF_COOKIE_HTTPONLY, samesite=settings.CSRF_COOKIE_SAMESITE, ) # Set the Vary header since content varies with the CSRF cookie. patch_vary_headers(response, ('Cookie',)) def process_request(self, request): csrf_token = self._get_token(request) if csrf_token is not None: # Use same token next time. request.META['CSRF_COOKIE'] = csrf_token def process_view(self, request, callback, callback_args, callback_kwargs): if getattr(request, 'csrf_processing_done', False): return None # Wait until request.META["CSRF_COOKIE"] has been manipulated before # bailing out, so that get_token still works if getattr(callback, 'csrf_exempt', False): return None # Assume that anything not defined as 'safe' by RFC7231 needs protection if request.method not in ('GET', 'HEAD', 'OPTIONS', 'TRACE'): if getattr(request, '_dont_enforce_csrf_checks', False): # Mechanism to turn off CSRF checks for test suite. # It comes after the creation of CSRF cookies, so that # everything else continues to work exactly the same # (e.g. cookies are sent, etc.), but before any # branches that call reject(). return self._accept(request) if request.is_secure(): # Suppose user visits http://example.com/ # An active network attacker (man-in-the-middle, MITM) sends a # POST form that targets https://example.com/detonate-bomb/ and # submits it via JavaScript. # # The attacker will need to provide a CSRF cookie and token, but # that's no problem for a MITM and the session-independent # secret we're using. So the MITM can circumvent the CSRF # protection. This is true for any HTTP connection, but anyone # using HTTPS expects better! For this reason, for # https://example.com/ we need additional protection that treats # http://example.com/ as completely untrusted. Under HTTPS, # Barth et al. found that the Referer header is missing for # same-domain requests in only about 0.2% of cases or less, so # we can use strict Referer checking. referer = request.META.get('HTTP_REFERER') if referer is None: return self._reject(request, REASON_NO_REFERER) referer = urlparse(referer) # Make sure we have a valid URL for Referer. if '' in (referer.scheme, referer.netloc): return self._reject(request, REASON_MALFORMED_REFERER) # Ensure that our Referer is also secure. if referer.scheme != 'https': return self._reject(request, REASON_INSECURE_REFERER) # If there isn't a CSRF_COOKIE_DOMAIN, require an exact match # match on host:port. If not, obey the cookie rules (or those # for the session cookie, if CSRF_USE_SESSIONS). good_referer = ( settings.SESSION_COOKIE_DOMAIN if settings.CSRF_USE_SESSIONS else settings.CSRF_COOKIE_DOMAIN ) if good_referer is not None: server_port = request.get_port() if server_port not in ('443', '80'): good_referer = '%s:%s' % (good_referer, server_port) else: try: # request.get_host() includes the port. good_referer = request.get_host() except DisallowedHost: pass # Create a list of all acceptable HTTP referers, including the # current host if it's permitted by ALLOWED_HOSTS. good_hosts = list(settings.CSRF_TRUSTED_ORIGINS) if good_referer is not None: good_hosts.append(good_referer) if not any(is_same_domain(referer.netloc, host) for host in good_hosts): reason = REASON_BAD_REFERER % referer.geturl() return self._reject(request, reason) # Access csrf_token via self._get_token() as rotate_token() may # have been called by an authentication middleware during the # process_request() phase. csrf_token = self._get_token(request) if csrf_token is None: # No CSRF cookie. For POST requests, we insist on a CSRF cookie, # and in this way we can avoid all CSRF attacks, including login # CSRF. return self._reject(request, REASON_NO_CSRF_COOKIE) # Check non-cookie token for match. request_csrf_token = "" if request.method == "POST": try: request_csrf_token = request.POST.get('csrfmiddlewaretoken', '') except OSError: # Handle a broken connection before we've completed reading # the POST data. process_view shouldn't raise any # exceptions, so we'll ignore and serve the user a 403 # (assuming they're still listening, which they probably # aren't because of the error). pass if request_csrf_token == "": # Fall back to X-CSRFToken, to make things easier for AJAX, # and possible for PUT/DELETE. request_csrf_token = request.META.get(settings.CSRF_HEADER_NAME, '') request_csrf_token = _sanitize_token(request_csrf_token) if not _compare_masked_tokens(request_csrf_token, csrf_token): return self._reject(request, REASON_BAD_TOKEN) return self._accept(request) def process_response(self, request, response): if not getattr(request, 'csrf_cookie_needs_reset', False): if getattr(response, 'csrf_cookie_set', False): return response if not request.META.get("CSRF_COOKIE_USED", False): return response # Set the CSRF cookie even if it's already set, so we renew # the expiry timer. self._set_token(request, response) response.csrf_cookie_set = True return response
67850bcfc33c8c7bf0dbe26cb5a0aef9e9ab5a48f627ea9b76628c4a7f764900
""" This module converts requested URLs to callback view functions. URLResolver is the main class here. Its resolve() method takes a URL (as a string) and returns a ResolverMatch object which provides access to all attributes of the resolved URL match. """ import functools import inspect import re import string from importlib import import_module from urllib.parse import quote from asgiref.local import Local from django.conf import settings from django.core.checks import Error, Warning from django.core.checks.urls import check_resolver from django.core.exceptions import ImproperlyConfigured, ViewDoesNotExist from django.utils.datastructures import MultiValueDict from django.utils.functional import cached_property from django.utils.http import RFC3986_SUBDELIMS, escape_leading_slashes from django.utils.regex_helper import _lazy_re_compile, normalize from django.utils.translation import get_language from .converters import get_converter from .exceptions import NoReverseMatch, Resolver404 from .utils import get_callable class ResolverMatch: def __init__(self, func, args, kwargs, url_name=None, app_names=None, namespaces=None, route=None): self.func = func self.args = args self.kwargs = kwargs self.url_name = url_name self.route = route # If a URLRegexResolver doesn't have a namespace or app_name, it passes # in an empty value. self.app_names = [x for x in app_names if x] if app_names else [] self.app_name = ':'.join(self.app_names) self.namespaces = [x for x in namespaces if x] if namespaces else [] self.namespace = ':'.join(self.namespaces) if not hasattr(func, '__name__'): # A class-based view self._func_path = func.__class__.__module__ + '.' + func.__class__.__name__ else: # A function-based view self._func_path = func.__module__ + '.' + func.__name__ view_path = url_name or self._func_path self.view_name = ':'.join(self.namespaces + [view_path]) def __getitem__(self, index): return (self.func, self.args, self.kwargs)[index] def __repr__(self): return "ResolverMatch(func=%s, args=%s, kwargs=%s, url_name=%s, app_names=%s, namespaces=%s, route=%s)" % ( self._func_path, self.args, self.kwargs, self.url_name, self.app_names, self.namespaces, self.route, ) def get_resolver(urlconf=None): if urlconf is None: urlconf = settings.ROOT_URLCONF return _get_cached_resolver(urlconf) @functools.lru_cache(maxsize=None) def _get_cached_resolver(urlconf=None): return URLResolver(RegexPattern(r'^/'), urlconf) @functools.lru_cache(maxsize=None) def get_ns_resolver(ns_pattern, resolver, converters): # Build a namespaced resolver for the given parent URLconf pattern. # This makes it possible to have captured parameters in the parent # URLconf pattern. pattern = RegexPattern(ns_pattern) pattern.converters = dict(converters) ns_resolver = URLResolver(pattern, resolver.url_patterns) return URLResolver(RegexPattern(r'^/'), [ns_resolver]) class LocaleRegexDescriptor: def __init__(self, attr): self.attr = attr def __get__(self, instance, cls=None): """ Return a compiled regular expression based on the active language. """ if instance is None: return self # As a performance optimization, if the given regex string is a regular # string (not a lazily-translated string proxy), compile it once and # avoid per-language compilation. pattern = getattr(instance, self.attr) if isinstance(pattern, str): instance.__dict__['regex'] = instance._compile(pattern) return instance.__dict__['regex'] language_code = get_language() if language_code not in instance._regex_dict: instance._regex_dict[language_code] = instance._compile(str(pattern)) return instance._regex_dict[language_code] class CheckURLMixin: def describe(self): """ Format the URL pattern for display in warning messages. """ description = "'{}'".format(self) if self.name: description += " [name='{}']".format(self.name) return description def _check_pattern_startswith_slash(self): """ Check that the pattern does not begin with a forward slash. """ regex_pattern = self.regex.pattern if not settings.APPEND_SLASH: # Skip check as it can be useful to start a URL pattern with a slash # when APPEND_SLASH=False. return [] if regex_pattern.startswith(('/', '^/', '^\\/')) and not regex_pattern.endswith('/'): warning = Warning( "Your URL pattern {} has a route beginning with a '/'. Remove this " "slash as it is unnecessary. If this pattern is targeted in an " "include(), ensure the include() pattern has a trailing '/'.".format( self.describe() ), id="urls.W002", ) return [warning] else: return [] class RegexPattern(CheckURLMixin): regex = LocaleRegexDescriptor('_regex') def __init__(self, regex, name=None, is_endpoint=False): self._regex = regex self._regex_dict = {} self._is_endpoint = is_endpoint self.name = name self.converters = {} def match(self, path): match = self.regex.search(path) if match: # If there are any named groups, use those as kwargs, ignoring # non-named groups. Otherwise, pass all non-named arguments as # positional arguments. kwargs = match.groupdict() args = () if kwargs else match.groups() kwargs = {k: v for k, v in kwargs.items() if v is not None} return path[match.end():], args, kwargs return None def check(self): warnings = [] warnings.extend(self._check_pattern_startswith_slash()) if not self._is_endpoint: warnings.extend(self._check_include_trailing_dollar()) return warnings def _check_include_trailing_dollar(self): regex_pattern = self.regex.pattern if regex_pattern.endswith('$') and not regex_pattern.endswith(r'\$'): return [Warning( "Your URL pattern {} uses include with a route ending with a '$'. " "Remove the dollar from the route to avoid problems including " "URLs.".format(self.describe()), id='urls.W001', )] else: return [] def _compile(self, regex): """Compile and return the given regular expression.""" try: return re.compile(regex) except re.error as e: raise ImproperlyConfigured( '"%s" is not a valid regular expression: %s' % (regex, e) ) from e def __str__(self): return str(self._regex) _PATH_PARAMETER_COMPONENT_RE = _lazy_re_compile( r'<(?:(?P<converter>[^>:]+):)?(?P<parameter>[^>]+)>' ) def _route_to_regex(route, is_endpoint=False): """ Convert a path pattern into a regular expression. Return the regular expression and a dictionary mapping the capture names to the converters. For example, 'foo/<int:pk>' returns '^foo\\/(?P<pk>[0-9]+)' and {'pk': <django.urls.converters.IntConverter>}. """ if not set(route).isdisjoint(string.whitespace): raise ImproperlyConfigured("URL route '%s' cannot contain whitespace." % route) original_route = route parts = ['^'] converters = {} while True: match = _PATH_PARAMETER_COMPONENT_RE.search(route) if not match: parts.append(re.escape(route)) break parts.append(re.escape(route[:match.start()])) route = route[match.end():] parameter = match.group('parameter') if not parameter.isidentifier(): raise ImproperlyConfigured( "URL route '%s' uses parameter name %r which isn't a valid " "Python identifier." % (original_route, parameter) ) raw_converter = match.group('converter') if raw_converter is None: # If a converter isn't specified, the default is `str`. raw_converter = 'str' try: converter = get_converter(raw_converter) except KeyError as e: raise ImproperlyConfigured( 'URL route %r uses invalid converter %r.' % (original_route, raw_converter) ) from e converters[parameter] = converter parts.append('(?P<' + parameter + '>' + converter.regex + ')') if is_endpoint: parts.append('$') return ''.join(parts), converters class RoutePattern(CheckURLMixin): regex = LocaleRegexDescriptor('_route') def __init__(self, route, name=None, is_endpoint=False): self._route = route self._regex_dict = {} self._is_endpoint = is_endpoint self.name = name self.converters = _route_to_regex(str(route), is_endpoint)[1] def match(self, path): match = self.regex.search(path) if match: # RoutePattern doesn't allow non-named groups so args are ignored. kwargs = match.groupdict() for key, value in kwargs.items(): converter = self.converters[key] try: kwargs[key] = converter.to_python(value) except ValueError: return None return path[match.end():], (), kwargs return None def check(self): warnings = self._check_pattern_startswith_slash() route = self._route if '(?P<' in route or route.startswith('^') or route.endswith('$'): warnings.append(Warning( "Your URL pattern {} has a route that contains '(?P<', begins " "with a '^', or ends with a '$'. This was likely an oversight " "when migrating to django.urls.path().".format(self.describe()), id='2_0.W001', )) return warnings def _compile(self, route): return re.compile(_route_to_regex(route, self._is_endpoint)[0]) def __str__(self): return str(self._route) class LocalePrefixPattern: def __init__(self, prefix_default_language=True): self.prefix_default_language = prefix_default_language self.converters = {} @property def regex(self): # This is only used by reverse() and cached in _reverse_dict. return re.compile(self.language_prefix) @property def language_prefix(self): language_code = get_language() or settings.LANGUAGE_CODE if language_code == settings.LANGUAGE_CODE and not self.prefix_default_language: return '' else: return '%s/' % language_code def match(self, path): language_prefix = self.language_prefix if path.startswith(language_prefix): return path[len(language_prefix):], (), {} return None def check(self): return [] def describe(self): return "'{}'".format(self) def __str__(self): return self.language_prefix class URLPattern: def __init__(self, pattern, callback, default_args=None, name=None): self.pattern = pattern self.callback = callback # the view self.default_args = default_args or {} self.name = name def __repr__(self): return '<%s %s>' % (self.__class__.__name__, self.pattern.describe()) def check(self): warnings = self._check_pattern_name() warnings.extend(self.pattern.check()) return warnings def _check_pattern_name(self): """ Check that the pattern name does not contain a colon. """ if self.pattern.name is not None and ":" in self.pattern.name: warning = Warning( "Your URL pattern {} has a name including a ':'. Remove the colon, to " "avoid ambiguous namespace references.".format(self.pattern.describe()), id="urls.W003", ) return [warning] else: return [] def resolve(self, path): match = self.pattern.match(path) if match: new_path, args, kwargs = match # Pass any extra_kwargs as **kwargs. kwargs.update(self.default_args) return ResolverMatch(self.callback, args, kwargs, self.pattern.name, route=str(self.pattern)) @cached_property def lookup_str(self): """ A string that identifies the view (e.g. 'path.to.view_function' or 'path.to.ClassBasedView'). """ callback = self.callback if isinstance(callback, functools.partial): callback = callback.func if not hasattr(callback, '__name__'): return callback.__module__ + "." + callback.__class__.__name__ return callback.__module__ + "." + callback.__qualname__ class URLResolver: def __init__(self, pattern, urlconf_name, default_kwargs=None, app_name=None, namespace=None): self.pattern = pattern # urlconf_name is the dotted Python path to the module defining # urlpatterns. It may also be an object with an urlpatterns attribute # or urlpatterns itself. self.urlconf_name = urlconf_name self.callback = None self.default_kwargs = default_kwargs or {} self.namespace = namespace self.app_name = app_name self._reverse_dict = {} self._namespace_dict = {} self._app_dict = {} # set of dotted paths to all functions and classes that are used in # urlpatterns self._callback_strs = set() self._populated = False self._local = Local() def __repr__(self): if isinstance(self.urlconf_name, list) and self.urlconf_name: # Don't bother to output the whole list, it can be huge urlconf_repr = '<%s list>' % self.urlconf_name[0].__class__.__name__ else: urlconf_repr = repr(self.urlconf_name) return '<%s %s (%s:%s) %s>' % ( self.__class__.__name__, urlconf_repr, self.app_name, self.namespace, self.pattern.describe(), ) def check(self): messages = [] for pattern in self.url_patterns: messages.extend(check_resolver(pattern)) messages.extend(self._check_custom_error_handlers()) return messages or self.pattern.check() def _check_custom_error_handlers(self): messages = [] # All handlers take (request, exception) arguments except handler500 # which takes (request). for status_code, num_parameters in [(400, 2), (403, 2), (404, 2), (500, 1)]: try: handler, param_dict = self.resolve_error_handler(status_code) except (ImportError, ViewDoesNotExist) as e: path = getattr(self.urlconf_module, 'handler%s' % status_code) msg = ( "The custom handler{status_code} view '{path}' could not be imported." ).format(status_code=status_code, path=path) messages.append(Error(msg, hint=str(e), id='urls.E008')) continue signature = inspect.signature(handler) args = [None] * num_parameters try: signature.bind(*args) except TypeError: msg = ( "The custom handler{status_code} view '{path}' does not " "take the correct number of arguments ({args})." ).format( status_code=status_code, path=handler.__module__ + '.' + handler.__qualname__, args='request, exception' if num_parameters == 2 else 'request', ) messages.append(Error(msg, id='urls.E007')) return messages def _populate(self): # Short-circuit if called recursively in this thread to prevent # infinite recursion. Concurrent threads may call this at the same # time and will need to continue, so set 'populating' on a # thread-local variable. if getattr(self._local, 'populating', False): return try: self._local.populating = True lookups = MultiValueDict() namespaces = {} apps = {} language_code = get_language() for url_pattern in reversed(self.url_patterns): p_pattern = url_pattern.pattern.regex.pattern if p_pattern.startswith('^'): p_pattern = p_pattern[1:] if isinstance(url_pattern, URLPattern): self._callback_strs.add(url_pattern.lookup_str) bits = normalize(url_pattern.pattern.regex.pattern) lookups.appendlist( url_pattern.callback, (bits, p_pattern, url_pattern.default_args, url_pattern.pattern.converters) ) if url_pattern.name is not None: lookups.appendlist( url_pattern.name, (bits, p_pattern, url_pattern.default_args, url_pattern.pattern.converters) ) else: # url_pattern is a URLResolver. url_pattern._populate() if url_pattern.app_name: apps.setdefault(url_pattern.app_name, []).append(url_pattern.namespace) namespaces[url_pattern.namespace] = (p_pattern, url_pattern) else: for name in url_pattern.reverse_dict: for matches, pat, defaults, converters in url_pattern.reverse_dict.getlist(name): new_matches = normalize(p_pattern + pat) lookups.appendlist( name, ( new_matches, p_pattern + pat, {**defaults, **url_pattern.default_kwargs}, {**self.pattern.converters, **url_pattern.pattern.converters, **converters} ) ) for namespace, (prefix, sub_pattern) in url_pattern.namespace_dict.items(): current_converters = url_pattern.pattern.converters sub_pattern.pattern.converters.update(current_converters) namespaces[namespace] = (p_pattern + prefix, sub_pattern) for app_name, namespace_list in url_pattern.app_dict.items(): apps.setdefault(app_name, []).extend(namespace_list) self._callback_strs.update(url_pattern._callback_strs) self._namespace_dict[language_code] = namespaces self._app_dict[language_code] = apps self._reverse_dict[language_code] = lookups self._populated = True finally: self._local.populating = False @property def reverse_dict(self): language_code = get_language() if language_code not in self._reverse_dict: self._populate() return self._reverse_dict[language_code] @property def namespace_dict(self): language_code = get_language() if language_code not in self._namespace_dict: self._populate() return self._namespace_dict[language_code] @property def app_dict(self): language_code = get_language() if language_code not in self._app_dict: self._populate() return self._app_dict[language_code] @staticmethod def _join_route(route1, route2): """Join two routes, without the starting ^ in the second route.""" if not route1: return route2 if route2.startswith('^'): route2 = route2[1:] return route1 + route2 def _is_callback(self, name): if not self._populated: self._populate() return name in self._callback_strs def resolve(self, path): path = str(path) # path may be a reverse_lazy object tried = [] match = self.pattern.match(path) if match: new_path, args, kwargs = match for pattern in self.url_patterns: try: sub_match = pattern.resolve(new_path) except Resolver404 as e: sub_tried = e.args[0].get('tried') if sub_tried is not None: tried.extend([pattern] + t for t in sub_tried) else: tried.append([pattern]) else: if sub_match: # Merge captured arguments in match with submatch sub_match_dict = {**kwargs, **self.default_kwargs} # Update the sub_match_dict with the kwargs from the sub_match. sub_match_dict.update(sub_match.kwargs) # If there are *any* named groups, ignore all non-named groups. # Otherwise, pass all non-named arguments as positional arguments. sub_match_args = sub_match.args if not sub_match_dict: sub_match_args = args + sub_match.args current_route = '' if isinstance(pattern, URLPattern) else str(pattern.pattern) return ResolverMatch( sub_match.func, sub_match_args, sub_match_dict, sub_match.url_name, [self.app_name] + sub_match.app_names, [self.namespace] + sub_match.namespaces, self._join_route(current_route, sub_match.route), ) tried.append([pattern]) raise Resolver404({'tried': tried, 'path': new_path}) raise Resolver404({'path': path}) @cached_property def urlconf_module(self): if isinstance(self.urlconf_name, str): return import_module(self.urlconf_name) else: return self.urlconf_name @cached_property def url_patterns(self): # urlconf_module might be a valid set of patterns, so we default to it patterns = getattr(self.urlconf_module, "urlpatterns", self.urlconf_module) try: iter(patterns) except TypeError as e: msg = ( "The included URLconf '{name}' does not appear to have any " "patterns in it. If you see valid patterns in the file then " "the issue is probably caused by a circular import." ) raise ImproperlyConfigured(msg.format(name=self.urlconf_name)) from e return patterns def resolve_error_handler(self, view_type): callback = getattr(self.urlconf_module, 'handler%s' % view_type, None) if not callback: # No handler specified in file; use lazy import, since # django.conf.urls imports this file. from django.conf import urls callback = getattr(urls, 'handler%s' % view_type) return get_callable(callback), {} def reverse(self, lookup_view, *args, **kwargs): return self._reverse_with_prefix(lookup_view, '', *args, **kwargs) def _reverse_with_prefix(self, lookup_view, _prefix, *args, **kwargs): if args and kwargs: raise ValueError("Don't mix *args and **kwargs in call to reverse()!") if not self._populated: self._populate() possibilities = self.reverse_dict.getlist(lookup_view) for possibility, pattern, defaults, converters in possibilities: for result, params in possibility: if args: if len(args) != len(params): continue candidate_subs = dict(zip(params, args)) else: if set(kwargs).symmetric_difference(params).difference(defaults): continue if any(kwargs.get(k, v) != v for k, v in defaults.items()): continue candidate_subs = kwargs # Convert the candidate subs to text using Converter.to_url(). text_candidate_subs = {} match = True for k, v in candidate_subs.items(): if k in converters: try: text_candidate_subs[k] = converters[k].to_url(v) except ValueError: match = False break else: text_candidate_subs[k] = str(v) if not match: continue # WSGI provides decoded URLs, without %xx escapes, and the URL # resolver operates on such URLs. First substitute arguments # without quoting to build a decoded URL and look for a match. # Then, if we have a match, redo the substitution with quoted # arguments in order to return a properly encoded URL. candidate_pat = _prefix.replace('%', '%%') + result if re.search('^%s%s' % (re.escape(_prefix), pattern), candidate_pat % text_candidate_subs): # safe characters from `pchar` definition of RFC 3986 url = quote(candidate_pat % text_candidate_subs, safe=RFC3986_SUBDELIMS + '/~:@') # Don't allow construction of scheme relative urls. return escape_leading_slashes(url) # lookup_view can be URL name or callable, but callables are not # friendly in error messages. m = getattr(lookup_view, '__module__', None) n = getattr(lookup_view, '__name__', None) if m is not None and n is not None: lookup_view_s = "%s.%s" % (m, n) else: lookup_view_s = lookup_view patterns = [pattern for (_, pattern, _, _) in possibilities] if patterns: if args: arg_msg = "arguments '%s'" % (args,) elif kwargs: arg_msg = "keyword arguments '%s'" % kwargs else: arg_msg = "no arguments" msg = ( "Reverse for '%s' with %s not found. %d pattern(s) tried: %s" % (lookup_view_s, arg_msg, len(patterns), patterns) ) else: msg = ( "Reverse for '%(view)s' not found. '%(view)s' is not " "a valid view function or pattern name." % {'view': lookup_view_s} ) raise NoReverseMatch(msg)
fa466b2cadbaf672d91abbbcb8c304873f275444da49c728dc8cb991673b101b
""" Helper functions for creating Form classes from Django models and database field objects. """ import warnings from itertools import chain from django.core.exceptions import ( NON_FIELD_ERRORS, FieldError, ImproperlyConfigured, ValidationError, ) from django.forms.fields import ChoiceField, Field from django.forms.forms import BaseForm, DeclarativeFieldsMetaclass from django.forms.formsets import BaseFormSet, formset_factory from django.forms.utils import ErrorList from django.forms.widgets import ( HiddenInput, MultipleHiddenInput, RadioSelect, SelectMultiple, ) from django.utils.deprecation import RemovedInDjango40Warning from django.utils.text import capfirst, get_text_list from django.utils.translation import gettext, gettext_lazy as _ __all__ = ( 'ModelForm', 'BaseModelForm', 'model_to_dict', 'fields_for_model', 'ModelChoiceField', 'ModelMultipleChoiceField', 'ALL_FIELDS', 'BaseModelFormSet', 'modelformset_factory', 'BaseInlineFormSet', 'inlineformset_factory', 'modelform_factory', ) ALL_FIELDS = '__all__' def construct_instance(form, instance, fields=None, exclude=None): """ Construct and return a model instance from the bound ``form``'s ``cleaned_data``, but do not save the returned instance to the database. """ from django.db import models opts = instance._meta cleaned_data = form.cleaned_data file_field_list = [] for f in opts.fields: if not f.editable or isinstance(f, models.AutoField) \ or f.name not in cleaned_data: continue if fields is not None and f.name not in fields: continue if exclude and f.name in exclude: continue # Leave defaults for fields that aren't in POST data, except for # checkbox inputs because they don't appear in POST data if not checked. if ( f.has_default() and form[f.name].field.widget.value_omitted_from_data(form.data, form.files, form.add_prefix(f.name)) and cleaned_data.get(f.name) in form[f.name].field.empty_values ): continue # Defer saving file-type fields until after the other fields, so a # callable upload_to can use the values from other fields. if isinstance(f, models.FileField): file_field_list.append(f) else: f.save_form_data(instance, cleaned_data[f.name]) for f in file_field_list: f.save_form_data(instance, cleaned_data[f.name]) return instance # ModelForms ################################################################# def model_to_dict(instance, fields=None, exclude=None): """ Return a dict containing the data in ``instance`` suitable for passing as a Form's ``initial`` keyword argument. ``fields`` is an optional list of field names. If provided, return only the named. ``exclude`` is an optional list of field names. If provided, exclude the named from the returned dict, even if they are listed in the ``fields`` argument. """ opts = instance._meta data = {} for f in chain(opts.concrete_fields, opts.private_fields, opts.many_to_many): if not getattr(f, 'editable', False): continue if fields is not None and f.name not in fields: continue if exclude and f.name in exclude: continue data[f.name] = f.value_from_object(instance) return data def apply_limit_choices_to_to_formfield(formfield): """Apply limit_choices_to to the formfield's queryset if needed.""" if hasattr(formfield, 'queryset') and hasattr(formfield, 'get_limit_choices_to'): limit_choices_to = formfield.get_limit_choices_to() if limit_choices_to is not None: formfield.queryset = formfield.queryset.complex_filter(limit_choices_to) def fields_for_model(model, fields=None, exclude=None, widgets=None, formfield_callback=None, localized_fields=None, labels=None, help_texts=None, error_messages=None, field_classes=None, *, apply_limit_choices_to=True): """ Return a dictionary containing form fields for the given model. ``fields`` is an optional list of field names. If provided, return only the named fields. ``exclude`` is an optional list of field names. If provided, exclude the named fields from the returned fields, even if they are listed in the ``fields`` argument. ``widgets`` is a dictionary of model field names mapped to a widget. ``formfield_callback`` is a callable that takes a model field and returns a form field. ``localized_fields`` is a list of names of fields which should be localized. ``labels`` is a dictionary of model field names mapped to a label. ``help_texts`` is a dictionary of model field names mapped to a help text. ``error_messages`` is a dictionary of model field names mapped to a dictionary of error messages. ``field_classes`` is a dictionary of model field names mapped to a form field class. ``apply_limit_choices_to`` is a boolean indicating if limit_choices_to should be applied to a field's queryset. """ field_dict = {} ignored = [] opts = model._meta # Avoid circular import from django.db.models import Field as ModelField sortable_private_fields = [f for f in opts.private_fields if isinstance(f, ModelField)] for f in sorted(chain(opts.concrete_fields, sortable_private_fields, opts.many_to_many)): if not getattr(f, 'editable', False): if (fields is not None and f.name in fields and (exclude is None or f.name not in exclude)): raise FieldError( "'%s' cannot be specified for %s model form as it is a non-editable field" % ( f.name, model.__name__) ) continue if fields is not None and f.name not in fields: continue if exclude and f.name in exclude: continue kwargs = {} if widgets and f.name in widgets: kwargs['widget'] = widgets[f.name] if localized_fields == ALL_FIELDS or (localized_fields and f.name in localized_fields): kwargs['localize'] = True if labels and f.name in labels: kwargs['label'] = labels[f.name] if help_texts and f.name in help_texts: kwargs['help_text'] = help_texts[f.name] if error_messages and f.name in error_messages: kwargs['error_messages'] = error_messages[f.name] if field_classes and f.name in field_classes: kwargs['form_class'] = field_classes[f.name] if formfield_callback is None: formfield = f.formfield(**kwargs) elif not callable(formfield_callback): raise TypeError('formfield_callback must be a function or callable') else: formfield = formfield_callback(f, **kwargs) if formfield: if apply_limit_choices_to: apply_limit_choices_to_to_formfield(formfield) field_dict[f.name] = formfield else: ignored.append(f.name) if fields: field_dict = { f: field_dict.get(f) for f in fields if (not exclude or f not in exclude) and f not in ignored } return field_dict class ModelFormOptions: def __init__(self, options=None): self.model = getattr(options, 'model', None) self.fields = getattr(options, 'fields', None) self.exclude = getattr(options, 'exclude', None) self.widgets = getattr(options, 'widgets', None) self.localized_fields = getattr(options, 'localized_fields', None) self.labels = getattr(options, 'labels', None) self.help_texts = getattr(options, 'help_texts', None) self.error_messages = getattr(options, 'error_messages', None) self.field_classes = getattr(options, 'field_classes', None) class ModelFormMetaclass(DeclarativeFieldsMetaclass): def __new__(mcs, name, bases, attrs): base_formfield_callback = None for b in bases: if hasattr(b, 'Meta') and hasattr(b.Meta, 'formfield_callback'): base_formfield_callback = b.Meta.formfield_callback break formfield_callback = attrs.pop('formfield_callback', base_formfield_callback) new_class = super().__new__(mcs, name, bases, attrs) if bases == (BaseModelForm,): return new_class opts = new_class._meta = ModelFormOptions(getattr(new_class, 'Meta', None)) # We check if a string was passed to `fields` or `exclude`, # which is likely to be a mistake where the user typed ('foo') instead # of ('foo',) for opt in ['fields', 'exclude', 'localized_fields']: value = getattr(opts, opt) if isinstance(value, str) and value != ALL_FIELDS: msg = ("%(model)s.Meta.%(opt)s cannot be a string. " "Did you mean to type: ('%(value)s',)?" % { 'model': new_class.__name__, 'opt': opt, 'value': value, }) raise TypeError(msg) if opts.model: # If a model is defined, extract form fields from it. if opts.fields is None and opts.exclude is None: raise ImproperlyConfigured( "Creating a ModelForm without either the 'fields' attribute " "or the 'exclude' attribute is prohibited; form %s " "needs updating." % name ) if opts.fields == ALL_FIELDS: # Sentinel for fields_for_model to indicate "get the list of # fields from the model" opts.fields = None fields = fields_for_model( opts.model, opts.fields, opts.exclude, opts.widgets, formfield_callback, opts.localized_fields, opts.labels, opts.help_texts, opts.error_messages, opts.field_classes, # limit_choices_to will be applied during ModelForm.__init__(). apply_limit_choices_to=False, ) # make sure opts.fields doesn't specify an invalid field none_model_fields = {k for k, v in fields.items() if not v} missing_fields = none_model_fields.difference(new_class.declared_fields) if missing_fields: message = 'Unknown field(s) (%s) specified for %s' message = message % (', '.join(missing_fields), opts.model.__name__) raise FieldError(message) # Override default model fields with any custom declared ones # (plus, include all the other declared fields). fields.update(new_class.declared_fields) else: fields = new_class.declared_fields new_class.base_fields = fields return new_class class BaseModelForm(BaseForm): def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None, initial=None, error_class=ErrorList, label_suffix=None, empty_permitted=False, instance=None, use_required_attribute=None, renderer=None): opts = self._meta if opts.model is None: raise ValueError('ModelForm has no model class specified.') if instance is None: # if we didn't get an instance, instantiate a new one self.instance = opts.model() object_data = {} else: self.instance = instance object_data = model_to_dict(instance, opts.fields, opts.exclude) # if initial was provided, it should override the values from instance if initial is not None: object_data.update(initial) # self._validate_unique will be set to True by BaseModelForm.clean(). # It is False by default so overriding self.clean() and failing to call # super will stop validate_unique from being called. self._validate_unique = False super().__init__( data, files, auto_id, prefix, object_data, error_class, label_suffix, empty_permitted, use_required_attribute=use_required_attribute, renderer=renderer, ) for formfield in self.fields.values(): apply_limit_choices_to_to_formfield(formfield) def _get_validation_exclusions(self): """ For backwards-compatibility, exclude several types of fields from model validation. See tickets #12507, #12521, #12553. """ exclude = [] # Build up a list of fields that should be excluded from model field # validation and unique checks. for f in self.instance._meta.fields: field = f.name # Exclude fields that aren't on the form. The developer may be # adding these values to the model after form validation. if field not in self.fields: exclude.append(f.name) # Don't perform model validation on fields that were defined # manually on the form and excluded via the ModelForm's Meta # class. See #12901. elif self._meta.fields and field not in self._meta.fields: exclude.append(f.name) elif self._meta.exclude and field in self._meta.exclude: exclude.append(f.name) # Exclude fields that failed form validation. There's no need for # the model fields to validate them as well. elif field in self._errors: exclude.append(f.name) # Exclude empty fields that are not required by the form, if the # underlying model field is required. This keeps the model field # from raising a required error. Note: don't exclude the field from # validation if the model field allows blanks. If it does, the blank # value may be included in a unique check, so cannot be excluded # from validation. else: form_field = self.fields[field] field_value = self.cleaned_data.get(field) if not f.blank and not form_field.required and field_value in form_field.empty_values: exclude.append(f.name) return exclude def clean(self): self._validate_unique = True return self.cleaned_data def _update_errors(self, errors): # Override any validation error messages defined at the model level # with those defined at the form level. opts = self._meta # Allow the model generated by construct_instance() to raise # ValidationError and have them handled in the same way as others. if hasattr(errors, 'error_dict'): error_dict = errors.error_dict else: error_dict = {NON_FIELD_ERRORS: errors} for field, messages in error_dict.items(): if (field == NON_FIELD_ERRORS and opts.error_messages and NON_FIELD_ERRORS in opts.error_messages): error_messages = opts.error_messages[NON_FIELD_ERRORS] elif field in self.fields: error_messages = self.fields[field].error_messages else: continue for message in messages: if (isinstance(message, ValidationError) and message.code in error_messages): message.message = error_messages[message.code] self.add_error(None, errors) def _post_clean(self): opts = self._meta exclude = self._get_validation_exclusions() # Foreign Keys being used to represent inline relationships # are excluded from basic field value validation. This is for two # reasons: firstly, the value may not be supplied (#12507; the # case of providing new values to the admin); secondly the # object being referred to may not yet fully exist (#12749). # However, these fields *must* be included in uniqueness checks, # so this can't be part of _get_validation_exclusions(). for name, field in self.fields.items(): if isinstance(field, InlineForeignKeyField): exclude.append(name) try: self.instance = construct_instance(self, self.instance, opts.fields, opts.exclude) except ValidationError as e: self._update_errors(e) try: self.instance.full_clean(exclude=exclude, validate_unique=False) except ValidationError as e: self._update_errors(e) # Validate uniqueness if needed. if self._validate_unique: self.validate_unique() def validate_unique(self): """ Call the instance's validate_unique() method and update the form's validation errors if any were raised. """ exclude = self._get_validation_exclusions() try: self.instance.validate_unique(exclude=exclude) except ValidationError as e: self._update_errors(e) def _save_m2m(self): """ Save the many-to-many fields and generic relations for this form. """ cleaned_data = self.cleaned_data exclude = self._meta.exclude fields = self._meta.fields opts = self.instance._meta # Note that for historical reasons we want to include also # private_fields here. (GenericRelation was previously a fake # m2m field). for f in chain(opts.many_to_many, opts.private_fields): if not hasattr(f, 'save_form_data'): continue if fields and f.name not in fields: continue if exclude and f.name in exclude: continue if f.name in cleaned_data: f.save_form_data(self.instance, cleaned_data[f.name]) def save(self, commit=True): """ Save this form's self.instance object if commit=True. Otherwise, add a save_m2m() method to the form which can be called after the instance is saved manually at a later time. Return the model instance. """ if self.errors: raise ValueError( "The %s could not be %s because the data didn't validate." % ( self.instance._meta.object_name, 'created' if self.instance._state.adding else 'changed', ) ) if commit: # If committing, save the instance and the m2m data immediately. self.instance.save() self._save_m2m() else: # If not committing, add a method to the form to allow deferred # saving of m2m data. self.save_m2m = self._save_m2m return self.instance save.alters_data = True class ModelForm(BaseModelForm, metaclass=ModelFormMetaclass): pass def modelform_factory(model, form=ModelForm, fields=None, exclude=None, formfield_callback=None, widgets=None, localized_fields=None, labels=None, help_texts=None, error_messages=None, field_classes=None): """ Return a ModelForm containing form fields for the given model. You can optionally pass a `form` argument to use as a starting point for constructing the ModelForm. ``fields`` is an optional list of field names. If provided, include only the named fields in the returned fields. If omitted or '__all__', use all fields. ``exclude`` is an optional list of field names. If provided, exclude the named fields from the returned fields, even if they are listed in the ``fields`` argument. ``widgets`` is a dictionary of model field names mapped to a widget. ``localized_fields`` is a list of names of fields which should be localized. ``formfield_callback`` is a callable that takes a model field and returns a form field. ``labels`` is a dictionary of model field names mapped to a label. ``help_texts`` is a dictionary of model field names mapped to a help text. ``error_messages`` is a dictionary of model field names mapped to a dictionary of error messages. ``field_classes`` is a dictionary of model field names mapped to a form field class. """ # Create the inner Meta class. FIXME: ideally, we should be able to # construct a ModelForm without creating and passing in a temporary # inner class. # Build up a list of attributes that the Meta object will have. attrs = {'model': model} if fields is not None: attrs['fields'] = fields if exclude is not None: attrs['exclude'] = exclude if widgets is not None: attrs['widgets'] = widgets if localized_fields is not None: attrs['localized_fields'] = localized_fields if labels is not None: attrs['labels'] = labels if help_texts is not None: attrs['help_texts'] = help_texts if error_messages is not None: attrs['error_messages'] = error_messages if field_classes is not None: attrs['field_classes'] = field_classes # If parent form class already has an inner Meta, the Meta we're # creating needs to inherit from the parent's inner meta. bases = (form.Meta,) if hasattr(form, 'Meta') else () Meta = type('Meta', bases, attrs) if formfield_callback: Meta.formfield_callback = staticmethod(formfield_callback) # Give this new form class a reasonable name. class_name = model.__name__ + 'Form' # Class attributes for the new form class. form_class_attrs = { 'Meta': Meta, 'formfield_callback': formfield_callback } if (getattr(Meta, 'fields', None) is None and getattr(Meta, 'exclude', None) is None): raise ImproperlyConfigured( "Calling modelform_factory without defining 'fields' or " "'exclude' explicitly is prohibited." ) # Instantiate type(form) in order to use the same metaclass as form. return type(form)(class_name, (form,), form_class_attrs) # ModelFormSets ############################################################## class BaseModelFormSet(BaseFormSet): """ A ``FormSet`` for editing a queryset and/or adding new objects to it. """ model = None # Set of fields that must be unique among forms of this set. unique_fields = set() def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None, queryset=None, *, initial=None, **kwargs): self.queryset = queryset self.initial_extra = initial super().__init__(**{'data': data, 'files': files, 'auto_id': auto_id, 'prefix': prefix, **kwargs}) def initial_form_count(self): """Return the number of forms that are required in this FormSet.""" if not self.is_bound: return len(self.get_queryset()) return super().initial_form_count() def _existing_object(self, pk): if not hasattr(self, '_object_dict'): self._object_dict = {o.pk: o for o in self.get_queryset()} return self._object_dict.get(pk) def _get_to_python(self, field): """ If the field is a related field, fetch the concrete field's (that is, the ultimate pointed-to field's) to_python. """ while field.remote_field is not None: field = field.remote_field.get_related_field() return field.to_python def _construct_form(self, i, **kwargs): pk_required = i < self.initial_form_count() if pk_required: if self.is_bound: pk_key = '%s-%s' % (self.add_prefix(i), self.model._meta.pk.name) try: pk = self.data[pk_key] except KeyError: # The primary key is missing. The user may have tampered # with POST data. pass else: to_python = self._get_to_python(self.model._meta.pk) try: pk = to_python(pk) except ValidationError: # The primary key exists but is an invalid value. The # user may have tampered with POST data. pass else: kwargs['instance'] = self._existing_object(pk) else: kwargs['instance'] = self.get_queryset()[i] elif self.initial_extra: # Set initial values for extra forms try: kwargs['initial'] = self.initial_extra[i - self.initial_form_count()] except IndexError: pass form = super()._construct_form(i, **kwargs) if pk_required: form.fields[self.model._meta.pk.name].required = True return form def get_queryset(self): if not hasattr(self, '_queryset'): if self.queryset is not None: qs = self.queryset else: qs = self.model._default_manager.get_queryset() # If the queryset isn't already ordered we need to add an # artificial ordering here to make sure that all formsets # constructed from this queryset have the same form order. if not qs.ordered: qs = qs.order_by(self.model._meta.pk.name) # Removed queryset limiting here. As per discussion re: #13023 # on django-dev, max_num should not prevent existing # related objects/inlines from being displayed. self._queryset = qs return self._queryset def save_new(self, form, commit=True): """Save and return a new model instance for the given form.""" return form.save(commit=commit) def save_existing(self, form, instance, commit=True): """Save and return an existing model instance for the given form.""" return form.save(commit=commit) def delete_existing(self, obj, commit=True): """Deletes an existing model instance.""" if commit: obj.delete() def save(self, commit=True): """ Save model instances for every form, adding and changing instances as necessary, and return the list of instances. """ if not commit: self.saved_forms = [] def save_m2m(): for form in self.saved_forms: form.save_m2m() self.save_m2m = save_m2m return self.save_existing_objects(commit) + self.save_new_objects(commit) save.alters_data = True def clean(self): self.validate_unique() def validate_unique(self): # Collect unique_checks and date_checks to run from all the forms. all_unique_checks = set() all_date_checks = set() forms_to_delete = self.deleted_forms valid_forms = [form for form in self.forms if form.is_valid() and form not in forms_to_delete] for form in valid_forms: exclude = form._get_validation_exclusions() unique_checks, date_checks = form.instance._get_unique_checks(exclude=exclude) all_unique_checks.update(unique_checks) all_date_checks.update(date_checks) errors = [] # Do each of the unique checks (unique and unique_together) for uclass, unique_check in all_unique_checks: seen_data = set() for form in valid_forms: # Get the data for the set of fields that must be unique among the forms. row_data = ( field if field in self.unique_fields else form.cleaned_data[field] for field in unique_check if field in form.cleaned_data ) # Reduce Model instances to their primary key values row_data = tuple( d._get_pk_val() if hasattr(d, '_get_pk_val') # Prevent "unhashable type: list" errors later on. else tuple(d) if isinstance(d, list) else d for d in row_data ) if row_data and None not in row_data: # if we've already seen it then we have a uniqueness failure if row_data in seen_data: # poke error messages into the right places and mark # the form as invalid errors.append(self.get_unique_error_message(unique_check)) form._errors[NON_FIELD_ERRORS] = self.error_class([self.get_form_error()]) # remove the data from the cleaned_data dict since it was invalid for field in unique_check: if field in form.cleaned_data: del form.cleaned_data[field] # mark the data as seen seen_data.add(row_data) # iterate over each of the date checks now for date_check in all_date_checks: seen_data = set() uclass, lookup, field, unique_for = date_check for form in valid_forms: # see if we have data for both fields if (form.cleaned_data and form.cleaned_data[field] is not None and form.cleaned_data[unique_for] is not None): # if it's a date lookup we need to get the data for all the fields if lookup == 'date': date = form.cleaned_data[unique_for] date_data = (date.year, date.month, date.day) # otherwise it's just the attribute on the date/datetime # object else: date_data = (getattr(form.cleaned_data[unique_for], lookup),) data = (form.cleaned_data[field],) + date_data # if we've already seen it then we have a uniqueness failure if data in seen_data: # poke error messages into the right places and mark # the form as invalid errors.append(self.get_date_error_message(date_check)) form._errors[NON_FIELD_ERRORS] = self.error_class([self.get_form_error()]) # remove the data from the cleaned_data dict since it was invalid del form.cleaned_data[field] # mark the data as seen seen_data.add(data) if errors: raise ValidationError(errors) def get_unique_error_message(self, unique_check): if len(unique_check) == 1: return gettext("Please correct the duplicate data for %(field)s.") % { "field": unique_check[0], } else: return gettext("Please correct the duplicate data for %(field)s, which must be unique.") % { "field": get_text_list(unique_check, _("and")), } def get_date_error_message(self, date_check): return gettext( "Please correct the duplicate data for %(field_name)s " "which must be unique for the %(lookup)s in %(date_field)s." ) % { 'field_name': date_check[2], 'date_field': date_check[3], 'lookup': str(date_check[1]), } def get_form_error(self): return gettext("Please correct the duplicate values below.") def save_existing_objects(self, commit=True): self.changed_objects = [] self.deleted_objects = [] if not self.initial_forms: return [] saved_instances = [] forms_to_delete = self.deleted_forms for form in self.initial_forms: obj = form.instance # If the pk is None, it means either: # 1. The object is an unexpected empty model, created by invalid # POST data such as an object outside the formset's queryset. # 2. The object was already deleted from the database. if obj.pk is None: continue if form in forms_to_delete: self.deleted_objects.append(obj) self.delete_existing(obj, commit=commit) elif form.has_changed(): self.changed_objects.append((obj, form.changed_data)) saved_instances.append(self.save_existing(form, obj, commit=commit)) if not commit: self.saved_forms.append(form) return saved_instances def save_new_objects(self, commit=True): self.new_objects = [] for form in self.extra_forms: if not form.has_changed(): continue # If someone has marked an add form for deletion, don't save the # object. if self.can_delete and self._should_delete_form(form): continue self.new_objects.append(self.save_new(form, commit=commit)) if not commit: self.saved_forms.append(form) return self.new_objects def add_fields(self, form, index): """Add a hidden field for the object's primary key.""" from django.db.models import AutoField, OneToOneField, ForeignKey self._pk_field = pk = self.model._meta.pk # If a pk isn't editable, then it won't be on the form, so we need to # add it here so we can tell which object is which when we get the # data back. Generally, pk.editable should be false, but for some # reason, auto_created pk fields and AutoField's editable attribute is # True, so check for that as well. def pk_is_not_editable(pk): return ( (not pk.editable) or (pk.auto_created or isinstance(pk, AutoField)) or ( pk.remote_field and pk.remote_field.parent_link and pk_is_not_editable(pk.remote_field.model._meta.pk) ) ) if pk_is_not_editable(pk) or pk.name not in form.fields: if form.is_bound: # If we're adding the related instance, ignore its primary key # as it could be an auto-generated default which isn't actually # in the database. pk_value = None if form.instance._state.adding else form.instance.pk else: try: if index is not None: pk_value = self.get_queryset()[index].pk else: pk_value = None except IndexError: pk_value = None if isinstance(pk, (ForeignKey, OneToOneField)): qs = pk.remote_field.model._default_manager.get_queryset() else: qs = self.model._default_manager.get_queryset() qs = qs.using(form.instance._state.db) if form._meta.widgets: widget = form._meta.widgets.get(self._pk_field.name, HiddenInput) else: widget = HiddenInput form.fields[self._pk_field.name] = ModelChoiceField(qs, initial=pk_value, required=False, widget=widget) super().add_fields(form, index) def modelformset_factory(model, form=ModelForm, formfield_callback=None, formset=BaseModelFormSet, extra=1, can_delete=False, can_order=False, max_num=None, fields=None, exclude=None, widgets=None, validate_max=False, localized_fields=None, labels=None, help_texts=None, error_messages=None, min_num=None, validate_min=False, field_classes=None): """Return a FormSet class for the given Django model class.""" meta = getattr(form, 'Meta', None) if (getattr(meta, 'fields', fields) is None and getattr(meta, 'exclude', exclude) is None): raise ImproperlyConfigured( "Calling modelformset_factory without defining 'fields' or " "'exclude' explicitly is prohibited." ) form = modelform_factory(model, form=form, fields=fields, exclude=exclude, formfield_callback=formfield_callback, widgets=widgets, localized_fields=localized_fields, labels=labels, help_texts=help_texts, error_messages=error_messages, field_classes=field_classes) FormSet = formset_factory(form, formset, extra=extra, min_num=min_num, max_num=max_num, can_order=can_order, can_delete=can_delete, validate_min=validate_min, validate_max=validate_max) FormSet.model = model return FormSet # InlineFormSets ############################################################# class BaseInlineFormSet(BaseModelFormSet): """A formset for child objects related to a parent.""" def __init__(self, data=None, files=None, instance=None, save_as_new=False, prefix=None, queryset=None, **kwargs): if instance is None: self.instance = self.fk.remote_field.model() else: self.instance = instance self.save_as_new = save_as_new if queryset is None: queryset = self.model._default_manager if self.instance.pk is not None: qs = queryset.filter(**{self.fk.name: self.instance}) else: qs = queryset.none() self.unique_fields = {self.fk.name} super().__init__(data, files, prefix=prefix, queryset=qs, **kwargs) # Add the generated field to form._meta.fields if it's defined to make # sure validation isn't skipped on that field. if self.form._meta.fields and self.fk.name not in self.form._meta.fields: if isinstance(self.form._meta.fields, tuple): self.form._meta.fields = list(self.form._meta.fields) self.form._meta.fields.append(self.fk.name) def initial_form_count(self): if self.save_as_new: return 0 return super().initial_form_count() def _construct_form(self, i, **kwargs): form = super()._construct_form(i, **kwargs) if self.save_as_new: mutable = getattr(form.data, '_mutable', None) # Allow modifying an immutable QueryDict. if mutable is not None: form.data._mutable = True # Remove the primary key from the form's data, we are only # creating new instances form.data[form.add_prefix(self._pk_field.name)] = None # Remove the foreign key from the form's data form.data[form.add_prefix(self.fk.name)] = None if mutable is not None: form.data._mutable = mutable # Set the fk value here so that the form can do its validation. fk_value = self.instance.pk if self.fk.remote_field.field_name != self.fk.remote_field.model._meta.pk.name: fk_value = getattr(self.instance, self.fk.remote_field.field_name) fk_value = getattr(fk_value, 'pk', fk_value) setattr(form.instance, self.fk.get_attname(), fk_value) return form @classmethod def get_default_prefix(cls): return cls.fk.remote_field.get_accessor_name(model=cls.model).replace('+', '') def save_new(self, form, commit=True): # Ensure the latest copy of the related instance is present on each # form (it may have been saved after the formset was originally # instantiated). setattr(form.instance, self.fk.name, self.instance) return super().save_new(form, commit=commit) def add_fields(self, form, index): super().add_fields(form, index) if self._pk_field == self.fk: name = self._pk_field.name kwargs = {'pk_field': True} else: # The foreign key field might not be on the form, so we poke at the # Model field to get the label, since we need that for error messages. name = self.fk.name kwargs = { 'label': getattr(form.fields.get(name), 'label', capfirst(self.fk.verbose_name)) } # The InlineForeignKeyField assumes that the foreign key relation is # based on the parent model's pk. If this isn't the case, set to_field # to correctly resolve the initial form value. if self.fk.remote_field.field_name != self.fk.remote_field.model._meta.pk.name: kwargs['to_field'] = self.fk.remote_field.field_name # If we're adding a new object, ignore a parent's auto-generated key # as it will be regenerated on the save request. if self.instance._state.adding: if kwargs.get('to_field') is not None: to_field = self.instance._meta.get_field(kwargs['to_field']) else: to_field = self.instance._meta.pk if to_field.has_default(): setattr(self.instance, to_field.attname, None) form.fields[name] = InlineForeignKeyField(self.instance, **kwargs) def get_unique_error_message(self, unique_check): unique_check = [field for field in unique_check if field != self.fk.name] return super().get_unique_error_message(unique_check) def _get_foreign_key(parent_model, model, fk_name=None, can_fail=False): """ Find and return the ForeignKey from model to parent if there is one (return None if can_fail is True and no such field exists). If fk_name is provided, assume it is the name of the ForeignKey field. Unless can_fail is True, raise an exception if there isn't a ForeignKey from model to parent_model. """ # avoid circular import from django.db.models import ForeignKey opts = model._meta if fk_name: fks_to_parent = [f for f in opts.fields if f.name == fk_name] if len(fks_to_parent) == 1: fk = fks_to_parent[0] if not isinstance(fk, ForeignKey) or \ (fk.remote_field.model != parent_model and fk.remote_field.model not in parent_model._meta.get_parent_list()): raise ValueError( "fk_name '%s' is not a ForeignKey to '%s'." % (fk_name, parent_model._meta.label) ) elif not fks_to_parent: raise ValueError( "'%s' has no field named '%s'." % (model._meta.label, fk_name) ) else: # Try to discover what the ForeignKey from model to parent_model is fks_to_parent = [ f for f in opts.fields if isinstance(f, ForeignKey) and ( f.remote_field.model == parent_model or f.remote_field.model in parent_model._meta.get_parent_list() ) ] if len(fks_to_parent) == 1: fk = fks_to_parent[0] elif not fks_to_parent: if can_fail: return raise ValueError( "'%s' has no ForeignKey to '%s'." % ( model._meta.label, parent_model._meta.label, ) ) else: raise ValueError( "'%s' has more than one ForeignKey to '%s'. You must specify " "a 'fk_name' attribute." % ( model._meta.label, parent_model._meta.label, ) ) return fk def inlineformset_factory(parent_model, model, form=ModelForm, formset=BaseInlineFormSet, fk_name=None, fields=None, exclude=None, extra=3, can_order=False, can_delete=True, max_num=None, formfield_callback=None, widgets=None, validate_max=False, localized_fields=None, labels=None, help_texts=None, error_messages=None, min_num=None, validate_min=False, field_classes=None): """ Return an ``InlineFormSet`` for the given kwargs. ``fk_name`` must be provided if ``model`` has more than one ``ForeignKey`` to ``parent_model``. """ fk = _get_foreign_key(parent_model, model, fk_name=fk_name) # enforce a max_num=1 when the foreign key to the parent model is unique. if fk.unique: max_num = 1 kwargs = { 'form': form, 'formfield_callback': formfield_callback, 'formset': formset, 'extra': extra, 'can_delete': can_delete, 'can_order': can_order, 'fields': fields, 'exclude': exclude, 'min_num': min_num, 'max_num': max_num, 'widgets': widgets, 'validate_min': validate_min, 'validate_max': validate_max, 'localized_fields': localized_fields, 'labels': labels, 'help_texts': help_texts, 'error_messages': error_messages, 'field_classes': field_classes, } FormSet = modelformset_factory(model, **kwargs) FormSet.fk = fk return FormSet # Fields ##################################################################### class InlineForeignKeyField(Field): """ A basic integer field that deals with validating the given value to a given parent instance in an inline. """ widget = HiddenInput default_error_messages = { 'invalid_choice': _('The inline value did not match the parent instance.'), } def __init__(self, parent_instance, *args, pk_field=False, to_field=None, **kwargs): self.parent_instance = parent_instance self.pk_field = pk_field self.to_field = to_field if self.parent_instance is not None: if self.to_field: kwargs["initial"] = getattr(self.parent_instance, self.to_field) else: kwargs["initial"] = self.parent_instance.pk kwargs["required"] = False super().__init__(*args, **kwargs) def clean(self, value): if value in self.empty_values: if self.pk_field: return None # if there is no value act as we did before. return self.parent_instance # ensure the we compare the values as equal types. if self.to_field: orig = getattr(self.parent_instance, self.to_field) else: orig = self.parent_instance.pk if str(value) != str(orig): raise ValidationError(self.error_messages['invalid_choice'], code='invalid_choice') return self.parent_instance def has_changed(self, initial, data): return False class ModelChoiceIteratorValue: def __init__(self, value, instance): self.value = value self.instance = instance def __str__(self): return str(self.value) def __eq__(self, other): if isinstance(other, ModelChoiceIteratorValue): other = other.value return self.value == other class ModelChoiceIterator: def __init__(self, field): self.field = field self.queryset = field.queryset def __iter__(self): if self.field.empty_label is not None: yield ("", self.field.empty_label) queryset = self.queryset # Can't use iterator() when queryset uses prefetch_related() if not queryset._prefetch_related_lookups: queryset = queryset.iterator() for obj in queryset: yield self.choice(obj) def __len__(self): # count() adds a query but uses less memory since the QuerySet results # won't be cached. In most cases, the choices will only be iterated on, # and __len__() won't be called. return self.queryset.count() + (1 if self.field.empty_label is not None else 0) def __bool__(self): return self.field.empty_label is not None or self.queryset.exists() def choice(self, obj): return ( ModelChoiceIteratorValue(self.field.prepare_value(obj), obj), self.field.label_from_instance(obj), ) class ModelChoiceField(ChoiceField): """A ChoiceField whose choices are a model QuerySet.""" # This class is a subclass of ChoiceField for purity, but it doesn't # actually use any of ChoiceField's implementation. default_error_messages = { 'invalid_choice': _('Select a valid choice. That choice is not one of' ' the available choices.'), } iterator = ModelChoiceIterator def __init__(self, queryset, *, empty_label="---------", required=True, widget=None, label=None, initial=None, help_text='', to_field_name=None, limit_choices_to=None, blank=False, **kwargs): # Call Field instead of ChoiceField __init__() because we don't need # ChoiceField.__init__(). Field.__init__( self, required=required, widget=widget, label=label, initial=initial, help_text=help_text, **kwargs ) if ( (required and initial is not None) or (isinstance(self.widget, RadioSelect) and not blank) ): self.empty_label = None else: self.empty_label = empty_label self.queryset = queryset self.limit_choices_to = limit_choices_to # limit the queryset later. self.to_field_name = to_field_name def get_limit_choices_to(self): """ Return ``limit_choices_to`` for this form field. If it is a callable, invoke it and return the result. """ if callable(self.limit_choices_to): return self.limit_choices_to() return self.limit_choices_to def __deepcopy__(self, memo): result = super(ChoiceField, self).__deepcopy__(memo) # Need to force a new ModelChoiceIterator to be created, bug #11183 if self.queryset is not None: result.queryset = self.queryset.all() return result def _get_queryset(self): return self._queryset def _set_queryset(self, queryset): self._queryset = None if queryset is None else queryset.all() self.widget.choices = self.choices queryset = property(_get_queryset, _set_queryset) # this method will be used to create object labels by the QuerySetIterator. # Override it to customize the label. def label_from_instance(self, obj): """ Convert objects into strings and generate the labels for the choices presented by this object. Subclasses can override this method to customize the display of the choices. """ return str(obj) def _get_choices(self): # If self._choices is set, then somebody must have manually set # the property self.choices. In this case, just return self._choices. if hasattr(self, '_choices'): return self._choices # Otherwise, execute the QuerySet in self.queryset to determine the # choices dynamically. Return a fresh ModelChoiceIterator that has not been # consumed. Note that we're instantiating a new ModelChoiceIterator *each* # time _get_choices() is called (and, thus, each time self.choices is # accessed) so that we can ensure the QuerySet has not been consumed. This # construct might look complicated but it allows for lazy evaluation of # the queryset. return self.iterator(self) choices = property(_get_choices, ChoiceField._set_choices) def prepare_value(self, value): if hasattr(value, '_meta'): if self.to_field_name: return value.serializable_value(self.to_field_name) else: return value.pk return super().prepare_value(value) def to_python(self, value): if value in self.empty_values: return None try: key = self.to_field_name or 'pk' if isinstance(value, self.queryset.model): value = getattr(value, key) value = self.queryset.get(**{key: value}) except (ValueError, TypeError, self.queryset.model.DoesNotExist): raise ValidationError(self.error_messages['invalid_choice'], code='invalid_choice') return value def validate(self, value): return Field.validate(self, value) def has_changed(self, initial, data): if self.disabled: return False initial_value = initial if initial is not None else '' data_value = data if data is not None else '' return str(self.prepare_value(initial_value)) != str(data_value) class ModelMultipleChoiceField(ModelChoiceField): """A MultipleChoiceField whose choices are a model QuerySet.""" widget = SelectMultiple hidden_widget = MultipleHiddenInput default_error_messages = { 'invalid_list': _('Enter a list of values.'), 'invalid_choice': _('Select a valid choice. %(value)s is not one of the' ' available choices.'), 'invalid_pk_value': _('“%(pk)s” is not a valid value.') } def __init__(self, queryset, **kwargs): super().__init__(queryset, empty_label=None, **kwargs) if self.error_messages.get('list') is not None: warnings.warn( "The 'list' error message key is deprecated in favor of " "'invalid_list'.", RemovedInDjango40Warning, stacklevel=2, ) self.error_messages['invalid_list'] = self.error_messages['list'] def to_python(self, value): if not value: return [] return list(self._check_values(value)) def clean(self, value): value = self.prepare_value(value) if self.required and not value: raise ValidationError(self.error_messages['required'], code='required') elif not self.required and not value: return self.queryset.none() if not isinstance(value, (list, tuple)): raise ValidationError( self.error_messages['invalid_list'], code='invalid_list', ) qs = self._check_values(value) # Since this overrides the inherited ModelChoiceField.clean # we run custom validators here self.run_validators(value) return qs def _check_values(self, value): """ Given a list of possible PK values, return a QuerySet of the corresponding objects. Raise a ValidationError if a given value is invalid (not a valid PK, not in the queryset, etc.) """ key = self.to_field_name or 'pk' # deduplicate given values to avoid creating many querysets or # requiring the database backend deduplicate efficiently. try: value = frozenset(value) except TypeError: # list of lists isn't hashable, for example raise ValidationError( self.error_messages['invalid_list'], code='invalid_list', ) for pk in value: try: self.queryset.filter(**{key: pk}) except (ValueError, TypeError): raise ValidationError( self.error_messages['invalid_pk_value'], code='invalid_pk_value', params={'pk': pk}, ) qs = self.queryset.filter(**{'%s__in' % key: value}) pks = {str(getattr(o, key)) for o in qs} for val in value: if str(val) not in pks: raise ValidationError( self.error_messages['invalid_choice'], code='invalid_choice', params={'value': val}, ) return qs def prepare_value(self, value): if (hasattr(value, '__iter__') and not isinstance(value, str) and not hasattr(value, '_meta')): prepare_value = super().prepare_value return [prepare_value(v) for v in value] return super().prepare_value(value) def has_changed(self, initial, data): if self.disabled: return False if initial is None: initial = [] if data is None: data = [] if len(initial) != len(data): return True initial_set = {str(value) for value in self.prepare_value(initial)} data_set = {str(value) for value in data} return data_set != initial_set def modelform_defines_fields(form_class): return hasattr(form_class, '_meta') and ( form_class._meta.fields is not None or form_class._meta.exclude is not None )
98ef13553a93c3c6b647d2cf3ae7d68857e6801341880689733a9be7d3d46a5c
""" HTML Widget classes """ import copy import datetime import warnings from collections import defaultdict from itertools import chain from django.conf import settings from django.forms.utils import to_current_timezone from django.templatetags.static import static from django.utils import datetime_safe, formats from django.utils.datastructures import OrderedSet from django.utils.dates import MONTHS from django.utils.formats import get_format from django.utils.html import format_html, html_safe from django.utils.regex_helper import _lazy_re_compile from django.utils.safestring import mark_safe from django.utils.topological_sort import ( CyclicDependencyError, stable_topological_sort, ) from django.utils.translation import gettext_lazy as _ from .renderers import get_default_renderer __all__ = ( 'Media', 'MediaDefiningClass', 'Widget', 'TextInput', 'NumberInput', 'EmailInput', 'URLInput', 'PasswordInput', 'HiddenInput', 'MultipleHiddenInput', 'FileInput', 'ClearableFileInput', 'Textarea', 'DateInput', 'DateTimeInput', 'TimeInput', 'CheckboxInput', 'Select', 'NullBooleanSelect', 'SelectMultiple', 'RadioSelect', 'CheckboxSelectMultiple', 'MultiWidget', 'SplitDateTimeWidget', 'SplitHiddenDateTimeWidget', 'SelectDateWidget', ) MEDIA_TYPES = ('css', 'js') class MediaOrderConflictWarning(RuntimeWarning): pass @html_safe class Media: def __init__(self, media=None, css=None, js=None): if media is not None: css = getattr(media, 'css', {}) js = getattr(media, 'js', []) else: if css is None: css = {} if js is None: js = [] self._css_lists = [css] self._js_lists = [js] def __repr__(self): return 'Media(css=%r, js=%r)' % (self._css, self._js) def __str__(self): return self.render() @property def _css(self): css = defaultdict(list) for css_list in self._css_lists: for medium, sublist in css_list.items(): css[medium].append(sublist) return {medium: self.merge(*lists) for medium, lists in css.items()} @property def _js(self): return self.merge(*self._js_lists) def render(self): return mark_safe('\n'.join(chain.from_iterable(getattr(self, 'render_' + name)() for name in MEDIA_TYPES))) def render_js(self): return [ format_html( '<script src="{}"></script>', self.absolute_path(path) ) for path in self._js ] def render_css(self): # To keep rendering order consistent, we can't just iterate over items(). # We need to sort the keys, and iterate over the sorted list. media = sorted(self._css) return chain.from_iterable([ format_html( '<link href="{}" type="text/css" media="{}" rel="stylesheet">', self.absolute_path(path), medium ) for path in self._css[medium] ] for medium in media) def absolute_path(self, path): """ Given a relative or absolute path to a static asset, return an absolute path. An absolute path will be returned unchanged while a relative path will be passed to django.templatetags.static.static(). """ if path.startswith(('http://', 'https://', '/')): return path return static(path) def __getitem__(self, name): """Return a Media object that only contains media of the given type.""" if name in MEDIA_TYPES: return Media(**{str(name): getattr(self, '_' + name)}) raise KeyError('Unknown media type "%s"' % name) @staticmethod def merge(*lists): """ Merge lists while trying to keep the relative order of the elements. Warn if the lists have the same elements in a different relative order. For static assets it can be important to have them included in the DOM in a certain order. In JavaScript you may not be able to reference a global or in CSS you might want to override a style. """ dependency_graph = defaultdict(set) all_items = OrderedSet() for list_ in filter(None, lists): head = list_[0] # The first items depend on nothing but have to be part of the # dependency graph to be included in the result. dependency_graph.setdefault(head, set()) for item in list_: all_items.add(item) # No self dependencies if head != item: dependency_graph[item].add(head) head = item try: return stable_topological_sort(all_items, dependency_graph) except CyclicDependencyError: warnings.warn( 'Detected duplicate Media files in an opposite order: {}'.format( ', '.join(repr(l) for l in lists) ), MediaOrderConflictWarning, ) return list(all_items) def __add__(self, other): combined = Media() combined._css_lists = self._css_lists + other._css_lists combined._js_lists = self._js_lists + other._js_lists return combined def media_property(cls): def _media(self): # Get the media property of the superclass, if it exists sup_cls = super(cls, self) try: base = sup_cls.media except AttributeError: base = Media() # Get the media definition for this class definition = getattr(cls, 'Media', None) if definition: extend = getattr(definition, 'extend', True) if extend: if extend is True: m = base else: m = Media() for medium in extend: m = m + base[medium] return m + Media(definition) return Media(definition) return base return property(_media) class MediaDefiningClass(type): """ Metaclass for classes that can have media definitions. """ def __new__(mcs, name, bases, attrs): new_class = super().__new__(mcs, name, bases, attrs) if 'media' not in attrs: new_class.media = media_property(new_class) return new_class class Widget(metaclass=MediaDefiningClass): needs_multipart_form = False # Determines does this widget need multipart form is_localized = False is_required = False supports_microseconds = True def __init__(self, attrs=None): self.attrs = {} if attrs is None else attrs.copy() def __deepcopy__(self, memo): obj = copy.copy(self) obj.attrs = self.attrs.copy() memo[id(self)] = obj return obj @property def is_hidden(self): return self.input_type == 'hidden' if hasattr(self, 'input_type') else False def subwidgets(self, name, value, attrs=None): context = self.get_context(name, value, attrs) yield context['widget'] def format_value(self, value): """ Return a value as it should appear when rendered in a template. """ if value == '' or value is None: return None if self.is_localized: return formats.localize_input(value) return str(value) def get_context(self, name, value, attrs): return { 'widget': { 'name': name, 'is_hidden': self.is_hidden, 'required': self.is_required, 'value': self.format_value(value), 'attrs': self.build_attrs(self.attrs, attrs), 'template_name': self.template_name, }, } def render(self, name, value, attrs=None, renderer=None): """Render the widget as an HTML string.""" context = self.get_context(name, value, attrs) return self._render(self.template_name, context, renderer) def _render(self, template_name, context, renderer=None): if renderer is None: renderer = get_default_renderer() return mark_safe(renderer.render(template_name, context)) def build_attrs(self, base_attrs, extra_attrs=None): """Build an attribute dictionary.""" return {**base_attrs, **(extra_attrs or {})} def value_from_datadict(self, data, files, name): """ Given a dictionary of data and this widget's name, return the value of this widget or None if it's not provided. """ return data.get(name) def value_omitted_from_data(self, data, files, name): return name not in data def id_for_label(self, id_): """ Return the HTML ID attribute of this Widget for use by a <label>, given the ID of the field. Return None if no ID is available. This hook is necessary because some widgets have multiple HTML elements and, thus, multiple IDs. In that case, this method should return an ID value that corresponds to the first ID in the widget's tags. """ return id_ def use_required_attribute(self, initial): return not self.is_hidden class Input(Widget): """ Base class for all <input> widgets. """ input_type = None # Subclasses must define this. template_name = 'django/forms/widgets/input.html' def __init__(self, attrs=None): if attrs is not None: attrs = attrs.copy() self.input_type = attrs.pop('type', self.input_type) super().__init__(attrs) def get_context(self, name, value, attrs): context = super().get_context(name, value, attrs) context['widget']['type'] = self.input_type return context class TextInput(Input): input_type = 'text' template_name = 'django/forms/widgets/text.html' class NumberInput(Input): input_type = 'number' template_name = 'django/forms/widgets/number.html' class EmailInput(Input): input_type = 'email' template_name = 'django/forms/widgets/email.html' class URLInput(Input): input_type = 'url' template_name = 'django/forms/widgets/url.html' class PasswordInput(Input): input_type = 'password' template_name = 'django/forms/widgets/password.html' def __init__(self, attrs=None, render_value=False): super().__init__(attrs) self.render_value = render_value def get_context(self, name, value, attrs): if not self.render_value: value = None return super().get_context(name, value, attrs) class HiddenInput(Input): input_type = 'hidden' template_name = 'django/forms/widgets/hidden.html' class MultipleHiddenInput(HiddenInput): """ Handle <input type="hidden"> for fields that have a list of values. """ template_name = 'django/forms/widgets/multiple_hidden.html' def get_context(self, name, value, attrs): context = super().get_context(name, value, attrs) final_attrs = context['widget']['attrs'] id_ = context['widget']['attrs'].get('id') subwidgets = [] for index, value_ in enumerate(context['widget']['value']): widget_attrs = final_attrs.copy() if id_: # An ID attribute was given. Add a numeric index as a suffix # so that the inputs don't all have the same ID attribute. widget_attrs['id'] = '%s_%s' % (id_, index) widget = HiddenInput() widget.is_required = self.is_required subwidgets.append(widget.get_context(name, value_, widget_attrs)['widget']) context['widget']['subwidgets'] = subwidgets return context def value_from_datadict(self, data, files, name): try: getter = data.getlist except AttributeError: getter = data.get return getter(name) def format_value(self, value): return [] if value is None else value class FileInput(Input): input_type = 'file' needs_multipart_form = True template_name = 'django/forms/widgets/file.html' def format_value(self, value): """File input never renders a value.""" return def value_from_datadict(self, data, files, name): "File widgets take data from FILES, not POST" return files.get(name) def value_omitted_from_data(self, data, files, name): return name not in files def use_required_attribute(self, initial): return super().use_required_attribute(initial) and not initial FILE_INPUT_CONTRADICTION = object() class ClearableFileInput(FileInput): clear_checkbox_label = _('Clear') initial_text = _('Currently') input_text = _('Change') template_name = 'django/forms/widgets/clearable_file_input.html' def clear_checkbox_name(self, name): """ Given the name of the file input, return the name of the clear checkbox input. """ return name + '-clear' def clear_checkbox_id(self, name): """ Given the name of the clear checkbox input, return the HTML id for it. """ return name + '_id' def is_initial(self, value): """ Return whether value is considered to be initial value. """ return bool(value and getattr(value, 'url', False)) def format_value(self, value): """ Return the file object if it has a defined url attribute. """ if self.is_initial(value): return value def get_context(self, name, value, attrs): context = super().get_context(name, value, attrs) checkbox_name = self.clear_checkbox_name(name) checkbox_id = self.clear_checkbox_id(checkbox_name) context['widget'].update({ 'checkbox_name': checkbox_name, 'checkbox_id': checkbox_id, 'is_initial': self.is_initial(value), 'input_text': self.input_text, 'initial_text': self.initial_text, 'clear_checkbox_label': self.clear_checkbox_label, }) return context def value_from_datadict(self, data, files, name): upload = super().value_from_datadict(data, files, name) if not self.is_required and CheckboxInput().value_from_datadict( data, files, self.clear_checkbox_name(name)): if upload: # If the user contradicts themselves (uploads a new file AND # checks the "clear" checkbox), we return a unique marker # object that FileField will turn into a ValidationError. return FILE_INPUT_CONTRADICTION # False signals to clear any existing value, as opposed to just None return False return upload def value_omitted_from_data(self, data, files, name): return ( super().value_omitted_from_data(data, files, name) and self.clear_checkbox_name(name) not in data ) class Textarea(Widget): template_name = 'django/forms/widgets/textarea.html' def __init__(self, attrs=None): # Use slightly better defaults than HTML's 20x2 box default_attrs = {'cols': '40', 'rows': '10'} if attrs: default_attrs.update(attrs) super().__init__(default_attrs) class DateTimeBaseInput(TextInput): format_key = '' supports_microseconds = False def __init__(self, attrs=None, format=None): super().__init__(attrs) self.format = format or None def format_value(self, value): return formats.localize_input(value, self.format or formats.get_format(self.format_key)[0]) class DateInput(DateTimeBaseInput): format_key = 'DATE_INPUT_FORMATS' template_name = 'django/forms/widgets/date.html' class DateTimeInput(DateTimeBaseInput): format_key = 'DATETIME_INPUT_FORMATS' template_name = 'django/forms/widgets/datetime.html' class TimeInput(DateTimeBaseInput): format_key = 'TIME_INPUT_FORMATS' template_name = 'django/forms/widgets/time.html' # Defined at module level so that CheckboxInput is picklable (#17976) def boolean_check(v): return not (v is False or v is None or v == '') class CheckboxInput(Input): input_type = 'checkbox' template_name = 'django/forms/widgets/checkbox.html' def __init__(self, attrs=None, check_test=None): super().__init__(attrs) # check_test is a callable that takes a value and returns True # if the checkbox should be checked for that value. self.check_test = boolean_check if check_test is None else check_test def format_value(self, value): """Only return the 'value' attribute if value isn't empty.""" if value is True or value is False or value is None or value == '': return return str(value) def get_context(self, name, value, attrs): if self.check_test(value): attrs = {**(attrs or {}), 'checked': True} return super().get_context(name, value, attrs) def value_from_datadict(self, data, files, name): if name not in data: # A missing value means False because HTML form submission does not # send results for unselected checkboxes. return False value = data.get(name) # Translate true and false strings to boolean values. values = {'true': True, 'false': False} if isinstance(value, str): value = values.get(value.lower(), value) return bool(value) def value_omitted_from_data(self, data, files, name): # HTML checkboxes don't appear in POST data if not checked, so it's # never known if the value is actually omitted. return False class ChoiceWidget(Widget): allow_multiple_selected = False input_type = None template_name = None option_template_name = None add_id_index = True checked_attribute = {'checked': True} option_inherits_attrs = True def __init__(self, attrs=None, choices=()): super().__init__(attrs) # choices can be any iterable, but we may need to render this widget # multiple times. Thus, collapse it into a list so it can be consumed # more than once. self.choices = list(choices) def __deepcopy__(self, memo): obj = copy.copy(self) obj.attrs = self.attrs.copy() obj.choices = copy.copy(self.choices) memo[id(self)] = obj return obj def subwidgets(self, name, value, attrs=None): """ Yield all "subwidgets" of this widget. Used to enable iterating options from a BoundField for choice widgets. """ value = self.format_value(value) yield from self.options(name, value, attrs) def options(self, name, value, attrs=None): """Yield a flat list of options for this widgets.""" for group in self.optgroups(name, value, attrs): yield from group[1] def optgroups(self, name, value, attrs=None): """Return a list of optgroups for this widget.""" groups = [] has_selected = False for index, (option_value, option_label) in enumerate(self.choices): if option_value is None: option_value = '' subgroup = [] if isinstance(option_label, (list, tuple)): group_name = option_value subindex = 0 choices = option_label else: group_name = None subindex = None choices = [(option_value, option_label)] groups.append((group_name, subgroup, index)) for subvalue, sublabel in choices: selected = ( str(subvalue) in value and (not has_selected or self.allow_multiple_selected) ) has_selected |= selected subgroup.append(self.create_option( name, subvalue, sublabel, selected, index, subindex=subindex, attrs=attrs, )) if subindex is not None: subindex += 1 return groups def create_option(self, name, value, label, selected, index, subindex=None, attrs=None): index = str(index) if subindex is None else "%s_%s" % (index, subindex) if attrs is None: attrs = {} option_attrs = self.build_attrs(self.attrs, attrs) if self.option_inherits_attrs else {} if selected: option_attrs.update(self.checked_attribute) if 'id' in option_attrs: option_attrs['id'] = self.id_for_label(option_attrs['id'], index) return { 'name': name, 'value': value, 'label': label, 'selected': selected, 'index': index, 'attrs': option_attrs, 'type': self.input_type, 'template_name': self.option_template_name, 'wrap_label': True, } def get_context(self, name, value, attrs): context = super().get_context(name, value, attrs) context['widget']['optgroups'] = self.optgroups(name, context['widget']['value'], attrs) return context def id_for_label(self, id_, index='0'): """ Use an incremented id for each option where the main widget references the zero index. """ if id_ and self.add_id_index: id_ = '%s_%s' % (id_, index) return id_ def value_from_datadict(self, data, files, name): getter = data.get if self.allow_multiple_selected: try: getter = data.getlist except AttributeError: pass return getter(name) def format_value(self, value): """Return selected values as a list.""" if value is None and self.allow_multiple_selected: return [] if not isinstance(value, (tuple, list)): value = [value] return [str(v) if v is not None else '' for v in value] class Select(ChoiceWidget): input_type = 'select' template_name = 'django/forms/widgets/select.html' option_template_name = 'django/forms/widgets/select_option.html' add_id_index = False checked_attribute = {'selected': True} option_inherits_attrs = False def get_context(self, name, value, attrs): context = super().get_context(name, value, attrs) if self.allow_multiple_selected: context['widget']['attrs']['multiple'] = True return context @staticmethod def _choice_has_empty_value(choice): """Return True if the choice's value is empty string or None.""" value, _ = choice return value is None or value == '' def use_required_attribute(self, initial): """ Don't render 'required' if the first <option> has a value, as that's invalid HTML. """ use_required_attribute = super().use_required_attribute(initial) # 'required' is always okay for <select multiple>. if self.allow_multiple_selected: return use_required_attribute first_choice = next(iter(self.choices), None) return use_required_attribute and first_choice is not None and self._choice_has_empty_value(first_choice) class NullBooleanSelect(Select): """ A Select Widget intended to be used with NullBooleanField. """ def __init__(self, attrs=None): choices = ( ('unknown', _('Unknown')), ('true', _('Yes')), ('false', _('No')), ) super().__init__(attrs, choices) def format_value(self, value): try: return { True: 'true', False: 'false', 'true': 'true', 'false': 'false', # For backwards compatibility with Django < 2.2. '2': 'true', '3': 'false', }[value] except KeyError: return 'unknown' def value_from_datadict(self, data, files, name): value = data.get(name) return { True: True, 'True': True, 'False': False, False: False, 'true': True, 'false': False, # For backwards compatibility with Django < 2.2. '2': True, '3': False, }.get(value) class SelectMultiple(Select): allow_multiple_selected = True def value_from_datadict(self, data, files, name): try: getter = data.getlist except AttributeError: getter = data.get return getter(name) def value_omitted_from_data(self, data, files, name): # An unselected <select multiple> doesn't appear in POST data, so it's # never known if the value is actually omitted. return False class RadioSelect(ChoiceWidget): input_type = 'radio' template_name = 'django/forms/widgets/radio.html' option_template_name = 'django/forms/widgets/radio_option.html' class CheckboxSelectMultiple(ChoiceWidget): allow_multiple_selected = True input_type = 'checkbox' template_name = 'django/forms/widgets/checkbox_select.html' option_template_name = 'django/forms/widgets/checkbox_option.html' def use_required_attribute(self, initial): # Don't use the 'required' attribute because browser validation would # require all checkboxes to be checked instead of at least one. return False def value_omitted_from_data(self, data, files, name): # HTML checkboxes don't appear in POST data if not checked, so it's # never known if the value is actually omitted. return False def id_for_label(self, id_, index=None): """" Don't include for="field_0" in <label> because clicking such a label would toggle the first checkbox. """ if index is None: return '' return super().id_for_label(id_, index) class MultiWidget(Widget): """ A widget that is composed of multiple widgets. In addition to the values added by Widget.get_context(), this widget adds a list of subwidgets to the context as widget['subwidgets']. These can be looped over and rendered like normal widgets. You'll probably want to use this class with MultiValueField. """ template_name = 'django/forms/widgets/multiwidget.html' def __init__(self, widgets, attrs=None): if isinstance(widgets, dict): self.widgets_names = [ ('_%s' % name) if name else '' for name in widgets ] widgets = widgets.values() else: self.widgets_names = ['_%s' % i for i in range(len(widgets))] self.widgets = [w() if isinstance(w, type) else w for w in widgets] super().__init__(attrs) @property def is_hidden(self): return all(w.is_hidden for w in self.widgets) def get_context(self, name, value, attrs): context = super().get_context(name, value, attrs) if self.is_localized: for widget in self.widgets: widget.is_localized = self.is_localized # value is a list of values, each corresponding to a widget # in self.widgets. if not isinstance(value, list): value = self.decompress(value) final_attrs = context['widget']['attrs'] input_type = final_attrs.pop('type', None) id_ = final_attrs.get('id') subwidgets = [] for i, (widget_name, widget) in enumerate(zip(self.widgets_names, self.widgets)): if input_type is not None: widget.input_type = input_type widget_name = name + widget_name try: widget_value = value[i] except IndexError: widget_value = None if id_: widget_attrs = final_attrs.copy() widget_attrs['id'] = '%s_%s' % (id_, i) else: widget_attrs = final_attrs subwidgets.append(widget.get_context(widget_name, widget_value, widget_attrs)['widget']) context['widget']['subwidgets'] = subwidgets return context def id_for_label(self, id_): if id_: id_ += '_0' return id_ def value_from_datadict(self, data, files, name): return [ widget.value_from_datadict(data, files, name + widget_name) for widget_name, widget in zip(self.widgets_names, self.widgets) ] def value_omitted_from_data(self, data, files, name): return all( widget.value_omitted_from_data(data, files, name + widget_name) for widget_name, widget in zip(self.widgets_names, self.widgets) ) def decompress(self, value): """ Return a list of decompressed values for the given compressed value. The given value can be assumed to be valid, but not necessarily non-empty. """ raise NotImplementedError('Subclasses must implement this method.') def _get_media(self): """ Media for a multiwidget is the combination of all media of the subwidgets. """ media = Media() for w in self.widgets: media = media + w.media return media media = property(_get_media) def __deepcopy__(self, memo): obj = super().__deepcopy__(memo) obj.widgets = copy.deepcopy(self.widgets) return obj @property def needs_multipart_form(self): return any(w.needs_multipart_form for w in self.widgets) class SplitDateTimeWidget(MultiWidget): """ A widget that splits datetime input into two <input type="text"> boxes. """ supports_microseconds = False template_name = 'django/forms/widgets/splitdatetime.html' def __init__(self, attrs=None, date_format=None, time_format=None, date_attrs=None, time_attrs=None): widgets = ( DateInput( attrs=attrs if date_attrs is None else date_attrs, format=date_format, ), TimeInput( attrs=attrs if time_attrs is None else time_attrs, format=time_format, ), ) super().__init__(widgets) def decompress(self, value): if value: value = to_current_timezone(value) return [value.date(), value.time()] return [None, None] class SplitHiddenDateTimeWidget(SplitDateTimeWidget): """ A widget that splits datetime input into two <input type="hidden"> inputs. """ template_name = 'django/forms/widgets/splithiddendatetime.html' def __init__(self, attrs=None, date_format=None, time_format=None, date_attrs=None, time_attrs=None): super().__init__(attrs, date_format, time_format, date_attrs, time_attrs) for widget in self.widgets: widget.input_type = 'hidden' class SelectDateWidget(Widget): """ A widget that splits date input into three <select> boxes. This also serves as an example of a Widget that has more than one HTML element and hence implements value_from_datadict. """ none_value = ('', '---') month_field = '%s_month' day_field = '%s_day' year_field = '%s_year' template_name = 'django/forms/widgets/select_date.html' input_type = 'select' select_widget = Select date_re = _lazy_re_compile(r'(\d{4}|0)-(\d\d?)-(\d\d?)$') def __init__(self, attrs=None, years=None, months=None, empty_label=None): self.attrs = attrs or {} # Optional list or tuple of years to use in the "year" select box. if years: self.years = years else: this_year = datetime.date.today().year self.years = range(this_year, this_year + 10) # Optional dict of months to use in the "month" select box. if months: self.months = months else: self.months = MONTHS # Optional string, list, or tuple to use as empty_label. if isinstance(empty_label, (list, tuple)): if not len(empty_label) == 3: raise ValueError('empty_label list/tuple must have 3 elements.') self.year_none_value = ('', empty_label[0]) self.month_none_value = ('', empty_label[1]) self.day_none_value = ('', empty_label[2]) else: if empty_label is not None: self.none_value = ('', empty_label) self.year_none_value = self.none_value self.month_none_value = self.none_value self.day_none_value = self.none_value def get_context(self, name, value, attrs): context = super().get_context(name, value, attrs) date_context = {} year_choices = [(i, str(i)) for i in self.years] if not self.is_required: year_choices.insert(0, self.year_none_value) year_name = self.year_field % name date_context['year'] = self.select_widget(attrs, choices=year_choices).get_context( name=year_name, value=context['widget']['value']['year'], attrs={**context['widget']['attrs'], 'id': 'id_%s' % year_name}, ) month_choices = list(self.months.items()) if not self.is_required: month_choices.insert(0, self.month_none_value) month_name = self.month_field % name date_context['month'] = self.select_widget(attrs, choices=month_choices).get_context( name=month_name, value=context['widget']['value']['month'], attrs={**context['widget']['attrs'], 'id': 'id_%s' % month_name}, ) day_choices = [(i, i) for i in range(1, 32)] if not self.is_required: day_choices.insert(0, self.day_none_value) day_name = self.day_field % name date_context['day'] = self.select_widget(attrs, choices=day_choices,).get_context( name=day_name, value=context['widget']['value']['day'], attrs={**context['widget']['attrs'], 'id': 'id_%s' % day_name}, ) subwidgets = [] for field in self._parse_date_fmt(): subwidgets.append(date_context[field]['widget']) context['widget']['subwidgets'] = subwidgets return context def format_value(self, value): """ Return a dict containing the year, month, and day of the current value. Use dict instead of a datetime to allow invalid dates such as February 31 to display correctly. """ year, month, day = None, None, None if isinstance(value, (datetime.date, datetime.datetime)): year, month, day = value.year, value.month, value.day elif isinstance(value, str): match = self.date_re.match(value) if match: # Convert any zeros in the date to empty strings to match the # empty option value. year, month, day = [int(val) or '' for val in match.groups()] elif settings.USE_L10N: input_format = get_format('DATE_INPUT_FORMATS')[0] try: d = datetime.datetime.strptime(value, input_format) except ValueError: pass else: year, month, day = d.year, d.month, d.day return {'year': year, 'month': month, 'day': day} @staticmethod def _parse_date_fmt(): fmt = get_format('DATE_FORMAT') escaped = False for char in fmt: if escaped: escaped = False elif char == '\\': escaped = True elif char in 'Yy': yield 'year' elif char in 'bEFMmNn': yield 'month' elif char in 'dj': yield 'day' def id_for_label(self, id_): for first_select in self._parse_date_fmt(): return '%s_%s' % (id_, first_select) return '%s_month' % id_ def value_from_datadict(self, data, files, name): y = data.get(self.year_field % name) m = data.get(self.month_field % name) d = data.get(self.day_field % name) if y == m == d == '': return None if y is not None and m is not None and d is not None: input_format = get_format('DATE_INPUT_FORMATS')[0] try: date_value = datetime.date(int(y), int(m), int(d)) except ValueError: # Return pseudo-ISO dates with zeros for any unselected values, # e.g. '2017-0-23'. return '%s-%s-%s' % (y or 0, m or 0, d or 0) date_value = datetime_safe.new_date(date_value) return date_value.strftime(input_format) return data.get(name) def value_omitted_from_data(self, data, files, name): return not any( ('{}_{}'.format(name, interval) in data) for interval in ('year', 'month', 'day') )
21169dfc68c96fc530425ab1af7a387ecd7b2e6a47110eddb3b83fd94d4192a6
import datetime import re from django.forms.utils import flatatt, pretty_name from django.forms.widgets import Textarea, TextInput from django.utils.functional import cached_property from django.utils.html import conditional_escape, format_html, html_safe from django.utils.safestring import mark_safe from django.utils.translation import gettext_lazy as _ __all__ = ('BoundField',) @html_safe class BoundField: "A Field plus data" def __init__(self, form, field, name): self.form = form self.field = field self.name = name self.html_name = form.add_prefix(name) self.html_initial_name = form.add_initial_prefix(name) self.html_initial_id = form.add_initial_prefix(self.auto_id) if self.field.label is None: self.label = pretty_name(name) else: self.label = self.field.label self.help_text = field.help_text or '' def __str__(self): """Render this field as an HTML widget.""" if self.field.show_hidden_initial: return self.as_widget() + self.as_hidden(only_initial=True) return self.as_widget() @cached_property def subwidgets(self): """ Most widgets yield a single subwidget, but others like RadioSelect and CheckboxSelectMultiple produce one subwidget for each choice. This property is cached so that only one database query occurs when rendering ModelChoiceFields. """ id_ = self.field.widget.attrs.get('id') or self.auto_id attrs = {'id': id_} if id_ else {} attrs = self.build_widget_attrs(attrs) return [ BoundWidget(self.field.widget, widget, self.form.renderer) for widget in self.field.widget.subwidgets(self.html_name, self.value(), attrs=attrs) ] def __bool__(self): # BoundField evaluates to True even if it doesn't have subwidgets. return True def __iter__(self): return iter(self.subwidgets) def __len__(self): return len(self.subwidgets) def __getitem__(self, idx): # Prevent unnecessary reevaluation when accessing BoundField's attrs # from templates. if not isinstance(idx, (int, slice)): raise TypeError( 'BoundField indices must be integers or slices, not %s.' % type(idx).__name__ ) return self.subwidgets[idx] @property def errors(self): """ Return an ErrorList (empty if there are no errors) for this field. """ return self.form.errors.get(self.name, self.form.error_class()) def as_widget(self, widget=None, attrs=None, only_initial=False): """ Render the field by rendering the passed widget, adding any HTML attributes passed as attrs. If a widget isn't specified, use the field's default widget. """ widget = widget or self.field.widget if self.field.localize: widget.is_localized = True attrs = attrs or {} attrs = self.build_widget_attrs(attrs, widget) if self.auto_id and 'id' not in widget.attrs: attrs.setdefault('id', self.html_initial_id if only_initial else self.auto_id) return widget.render( name=self.html_initial_name if only_initial else self.html_name, value=self.value(), attrs=attrs, renderer=self.form.renderer, ) def as_text(self, attrs=None, **kwargs): """ Return a string of HTML for representing this as an <input type="text">. """ return self.as_widget(TextInput(), attrs, **kwargs) def as_textarea(self, attrs=None, **kwargs): """Return a string of HTML for representing this as a <textarea>.""" return self.as_widget(Textarea(), attrs, **kwargs) def as_hidden(self, attrs=None, **kwargs): """ Return a string of HTML for representing this as an <input type="hidden">. """ return self.as_widget(self.field.hidden_widget(), attrs, **kwargs) @property def data(self): """ Return the data for this BoundField, or None if it wasn't given. """ return self.field.widget.value_from_datadict(self.form.data, self.form.files, self.html_name) def value(self): """ Return the value for this BoundField, using the initial value if the form is not bound or the data otherwise. """ data = self.initial if self.form.is_bound: data = self.field.bound_data(self.data, data) return self.field.prepare_value(data) def label_tag(self, contents=None, attrs=None, label_suffix=None): """ Wrap the given contents in a <label>, if the field has an ID attribute. contents should be mark_safe'd to avoid HTML escaping. If contents aren't given, use the field's HTML-escaped label. If attrs are given, use them as HTML attributes on the <label> tag. label_suffix overrides the form's label_suffix. """ contents = contents or self.label if label_suffix is None: label_suffix = (self.field.label_suffix if self.field.label_suffix is not None else self.form.label_suffix) # Only add the suffix if the label does not end in punctuation. # Translators: If found as last label character, these punctuation # characters will prevent the default label_suffix to be appended to the label if label_suffix and contents and contents[-1] not in _(':?.!'): contents = format_html('{}{}', contents, label_suffix) widget = self.field.widget id_ = widget.attrs.get('id') or self.auto_id if id_: id_for_label = widget.id_for_label(id_) if id_for_label: attrs = {**(attrs or {}), 'for': id_for_label} if self.field.required and hasattr(self.form, 'required_css_class'): attrs = attrs or {} if 'class' in attrs: attrs['class'] += ' ' + self.form.required_css_class else: attrs['class'] = self.form.required_css_class attrs = flatatt(attrs) if attrs else '' contents = format_html('<label{}>{}</label>', attrs, contents) else: contents = conditional_escape(contents) return mark_safe(contents) def css_classes(self, extra_classes=None): """ Return a string of space-separated CSS classes for this field. """ if hasattr(extra_classes, 'split'): extra_classes = extra_classes.split() extra_classes = set(extra_classes or []) if self.errors and hasattr(self.form, 'error_css_class'): extra_classes.add(self.form.error_css_class) if self.field.required and hasattr(self.form, 'required_css_class'): extra_classes.add(self.form.required_css_class) return ' '.join(extra_classes) @property def is_hidden(self): """Return True if this BoundField's widget is hidden.""" return self.field.widget.is_hidden @property def auto_id(self): """ Calculate and return the ID attribute for this BoundField, if the associated Form has specified auto_id. Return an empty string otherwise. """ auto_id = self.form.auto_id # Boolean or string if auto_id and '%s' in str(auto_id): return auto_id % self.html_name elif auto_id: return self.html_name return '' @property def id_for_label(self): """ Wrapper around the field widget's `id_for_label` method. Useful, for example, for focusing on this field regardless of whether it has a single widget or a MultiWidget. """ widget = self.field.widget id_ = widget.attrs.get('id') or self.auto_id return widget.id_for_label(id_) @cached_property def initial(self): data = self.form.get_initial_for_field(self.field, self.name) # If this is an auto-generated default date, nix the microseconds for # standardized handling. See #22502. if (isinstance(data, (datetime.datetime, datetime.time)) and not self.field.widget.supports_microseconds): data = data.replace(microsecond=0) return data def build_widget_attrs(self, attrs, widget=None): widget = widget or self.field.widget attrs = dict(attrs) # Copy attrs to avoid modifying the argument. if widget.use_required_attribute(self.initial) and self.field.required and self.form.use_required_attribute: attrs['required'] = True if self.field.disabled: attrs['disabled'] = True return attrs @property def widget_type(self): return re.sub(r'widget$|input$', '', self.field.widget.__class__.__name__.lower()) @html_safe class BoundWidget: """ A container class used for iterating over widgets. This is useful for widgets that have choices. For example, the following can be used in a template: {% for radio in myform.beatles %} <label for="{{ radio.id_for_label }}"> {{ radio.choice_label }} <span class="radio">{{ radio.tag }}</span> </label> {% endfor %} """ def __init__(self, parent_widget, data, renderer): self.parent_widget = parent_widget self.data = data self.renderer = renderer def __str__(self): return self.tag(wrap_label=True) def tag(self, wrap_label=False): context = {'widget': {**self.data, 'wrap_label': wrap_label}} return self.parent_widget._render(self.template_name, context, self.renderer) @property def template_name(self): if 'template_name' in self.data: return self.data['template_name'] return self.parent_widget.template_name @property def id_for_label(self): return 'id_%s_%s' % (self.data['name'], self.data['index']) @property def choice_label(self): return self.data['label']
cb292b9861d24166ba4bc3b62620c8a8799048e3b1fb1e9989b09be4620e3f44
""" Field classes. """ import copy import datetime import json import math import operator import os import re import uuid from decimal import Decimal, DecimalException from io import BytesIO from urllib.parse import urlsplit, urlunsplit from django.core import validators from django.core.exceptions import ValidationError from django.forms.boundfield import BoundField from django.forms.utils import from_current_timezone, to_current_timezone from django.forms.widgets import ( FILE_INPUT_CONTRADICTION, CheckboxInput, ClearableFileInput, DateInput, DateTimeInput, EmailInput, FileInput, HiddenInput, MultipleHiddenInput, NullBooleanSelect, NumberInput, Select, SelectMultiple, SplitDateTimeWidget, SplitHiddenDateTimeWidget, Textarea, TextInput, TimeInput, URLInput, ) from django.utils import formats from django.utils.dateparse import parse_datetime, parse_duration from django.utils.duration import duration_string from django.utils.ipv6 import clean_ipv6_address from django.utils.regex_helper import _lazy_re_compile from django.utils.translation import gettext_lazy as _, ngettext_lazy __all__ = ( 'Field', 'CharField', 'IntegerField', 'DateField', 'TimeField', 'DateTimeField', 'DurationField', 'RegexField', 'EmailField', 'FileField', 'ImageField', 'URLField', 'BooleanField', 'NullBooleanField', 'ChoiceField', 'MultipleChoiceField', 'ComboField', 'MultiValueField', 'FloatField', 'DecimalField', 'SplitDateTimeField', 'GenericIPAddressField', 'FilePathField', 'JSONField', 'SlugField', 'TypedChoiceField', 'TypedMultipleChoiceField', 'UUIDField', ) class Field: widget = TextInput # Default widget to use when rendering this type of Field. hidden_widget = HiddenInput # Default widget to use when rendering this as "hidden". default_validators = [] # Default set of validators # Add an 'invalid' entry to default_error_message if you want a specific # field error message not raised by the field validators. default_error_messages = { 'required': _('This field is required.'), } empty_values = list(validators.EMPTY_VALUES) def __init__(self, *, required=True, widget=None, label=None, initial=None, help_text='', error_messages=None, show_hidden_initial=False, validators=(), localize=False, disabled=False, label_suffix=None): # required -- Boolean that specifies whether the field is required. # True by default. # widget -- A Widget class, or instance of a Widget class, that should # be used for this Field when displaying it. Each Field has a # default Widget that it'll use if you don't specify this. In # most cases, the default widget is TextInput. # label -- A verbose name for this field, for use in displaying this # field in a form. By default, Django will use a "pretty" # version of the form field name, if the Field is part of a # Form. # initial -- A value to use in this Field's initial display. This value # is *not* used as a fallback if data isn't given. # help_text -- An optional string to use as "help text" for this Field. # error_messages -- An optional dictionary to override the default # messages that the field will raise. # show_hidden_initial -- Boolean that specifies if it is needed to render a # hidden widget with initial value after widget. # validators -- List of additional validators to use # localize -- Boolean that specifies if the field should be localized. # disabled -- Boolean that specifies whether the field is disabled, that # is its widget is shown in the form but not editable. # label_suffix -- Suffix to be added to the label. Overrides # form's label_suffix. self.required, self.label, self.initial = required, label, initial self.show_hidden_initial = show_hidden_initial self.help_text = help_text self.disabled = disabled self.label_suffix = label_suffix widget = widget or self.widget if isinstance(widget, type): widget = widget() else: widget = copy.deepcopy(widget) # Trigger the localization machinery if needed. self.localize = localize if self.localize: widget.is_localized = True # Let the widget know whether it should display as required. widget.is_required = self.required # Hook into self.widget_attrs() for any Field-specific HTML attributes. extra_attrs = self.widget_attrs(widget) if extra_attrs: widget.attrs.update(extra_attrs) self.widget = widget messages = {} for c in reversed(self.__class__.__mro__): messages.update(getattr(c, 'default_error_messages', {})) messages.update(error_messages or {}) self.error_messages = messages self.validators = [*self.default_validators, *validators] super().__init__() def prepare_value(self, value): return value def to_python(self, value): return value def validate(self, value): if value in self.empty_values and self.required: raise ValidationError(self.error_messages['required'], code='required') def run_validators(self, value): if value in self.empty_values: return errors = [] for v in self.validators: try: v(value) except ValidationError as e: if hasattr(e, 'code') and e.code in self.error_messages: e.message = self.error_messages[e.code] errors.extend(e.error_list) if errors: raise ValidationError(errors) def clean(self, value): """ Validate the given value and return its "cleaned" value as an appropriate Python object. Raise ValidationError for any errors. """ value = self.to_python(value) self.validate(value) self.run_validators(value) return value def bound_data(self, data, initial): """ Return the value that should be shown for this field on render of a bound form, given the submitted POST data for the field and the initial data, if any. For most fields, this will simply be data; FileFields need to handle it a bit differently. """ if self.disabled: return initial return data def widget_attrs(self, widget): """ Given a Widget instance (*not* a Widget class), return a dictionary of any HTML attributes that should be added to the Widget, based on this Field. """ return {} def has_changed(self, initial, data): """Return True if data differs from initial.""" # Always return False if the field is disabled since self.bound_data # always uses the initial value in this case. if self.disabled: return False try: data = self.to_python(data) if hasattr(self, '_coerce'): return self._coerce(data) != self._coerce(initial) except ValidationError: return True # For purposes of seeing whether something has changed, None is # the same as an empty string, if the data or initial value we get # is None, replace it with ''. initial_value = initial if initial is not None else '' data_value = data if data is not None else '' return initial_value != data_value def get_bound_field(self, form, field_name): """ Return a BoundField instance that will be used when accessing the form field in a template. """ return BoundField(form, self, field_name) def __deepcopy__(self, memo): result = copy.copy(self) memo[id(self)] = result result.widget = copy.deepcopy(self.widget, memo) result.error_messages = self.error_messages.copy() result.validators = self.validators[:] return result class CharField(Field): def __init__(self, *, max_length=None, min_length=None, strip=True, empty_value='', **kwargs): self.max_length = max_length self.min_length = min_length self.strip = strip self.empty_value = empty_value super().__init__(**kwargs) if min_length is not None: self.validators.append(validators.MinLengthValidator(int(min_length))) if max_length is not None: self.validators.append(validators.MaxLengthValidator(int(max_length))) self.validators.append(validators.ProhibitNullCharactersValidator()) def to_python(self, value): """Return a string.""" if value not in self.empty_values: value = str(value) if self.strip: value = value.strip() if value in self.empty_values: return self.empty_value return value def widget_attrs(self, widget): attrs = super().widget_attrs(widget) if self.max_length is not None and not widget.is_hidden: # The HTML attribute is maxlength, not max_length. attrs['maxlength'] = str(self.max_length) if self.min_length is not None and not widget.is_hidden: # The HTML attribute is minlength, not min_length. attrs['minlength'] = str(self.min_length) return attrs class IntegerField(Field): widget = NumberInput default_error_messages = { 'invalid': _('Enter a whole number.'), } re_decimal = _lazy_re_compile(r'\.0*\s*$') def __init__(self, *, max_value=None, min_value=None, **kwargs): self.max_value, self.min_value = max_value, min_value if kwargs.get('localize') and self.widget == NumberInput: # Localized number input is not well supported on most browsers kwargs.setdefault('widget', super().widget) super().__init__(**kwargs) if max_value is not None: self.validators.append(validators.MaxValueValidator(max_value)) if min_value is not None: self.validators.append(validators.MinValueValidator(min_value)) def to_python(self, value): """ Validate that int() can be called on the input. Return the result of int() or None for empty values. """ value = super().to_python(value) if value in self.empty_values: return None if self.localize: value = formats.sanitize_separators(value) # Strip trailing decimal and zeros. try: value = int(self.re_decimal.sub('', str(value))) except (ValueError, TypeError): raise ValidationError(self.error_messages['invalid'], code='invalid') return value def widget_attrs(self, widget): attrs = super().widget_attrs(widget) if isinstance(widget, NumberInput): if self.min_value is not None: attrs['min'] = self.min_value if self.max_value is not None: attrs['max'] = self.max_value return attrs class FloatField(IntegerField): default_error_messages = { 'invalid': _('Enter a number.'), } def to_python(self, value): """ Validate that float() can be called on the input. Return the result of float() or None for empty values. """ value = super(IntegerField, self).to_python(value) if value in self.empty_values: return None if self.localize: value = formats.sanitize_separators(value) try: value = float(value) except (ValueError, TypeError): raise ValidationError(self.error_messages['invalid'], code='invalid') return value def validate(self, value): super().validate(value) if value in self.empty_values: return if not math.isfinite(value): raise ValidationError(self.error_messages['invalid'], code='invalid') def widget_attrs(self, widget): attrs = super().widget_attrs(widget) if isinstance(widget, NumberInput) and 'step' not in widget.attrs: attrs.setdefault('step', 'any') return attrs class DecimalField(IntegerField): default_error_messages = { 'invalid': _('Enter a number.'), } def __init__(self, *, max_value=None, min_value=None, max_digits=None, decimal_places=None, **kwargs): self.max_digits, self.decimal_places = max_digits, decimal_places super().__init__(max_value=max_value, min_value=min_value, **kwargs) self.validators.append(validators.DecimalValidator(max_digits, decimal_places)) def to_python(self, value): """ Validate that the input is a decimal number. Return a Decimal instance or None for empty values. Ensure that there are no more than max_digits in the number and no more than decimal_places digits after the decimal point. """ if value in self.empty_values: return None if self.localize: value = formats.sanitize_separators(value) value = str(value).strip() try: value = Decimal(value) except DecimalException: raise ValidationError(self.error_messages['invalid'], code='invalid') return value def validate(self, value): super().validate(value) if value in self.empty_values: return if not value.is_finite(): raise ValidationError(self.error_messages['invalid'], code='invalid') def widget_attrs(self, widget): attrs = super().widget_attrs(widget) if isinstance(widget, NumberInput) and 'step' not in widget.attrs: if self.decimal_places is not None: # Use exponential notation for small values since they might # be parsed as 0 otherwise. ref #20765 step = str(Decimal(1).scaleb(-self.decimal_places)).lower() else: step = 'any' attrs.setdefault('step', step) return attrs class BaseTemporalField(Field): def __init__(self, *, input_formats=None, **kwargs): super().__init__(**kwargs) if input_formats is not None: self.input_formats = input_formats def to_python(self, value): value = value.strip() # Try to strptime against each input format. for format in self.input_formats: try: return self.strptime(value, format) except (ValueError, TypeError): continue raise ValidationError(self.error_messages['invalid'], code='invalid') def strptime(self, value, format): raise NotImplementedError('Subclasses must define this method.') class DateField(BaseTemporalField): widget = DateInput input_formats = formats.get_format_lazy('DATE_INPUT_FORMATS') default_error_messages = { 'invalid': _('Enter a valid date.'), } def to_python(self, value): """ Validate that the input can be converted to a date. Return a Python datetime.date object. """ if value in self.empty_values: return None if isinstance(value, datetime.datetime): return value.date() if isinstance(value, datetime.date): return value return super().to_python(value) def strptime(self, value, format): return datetime.datetime.strptime(value, format).date() class TimeField(BaseTemporalField): widget = TimeInput input_formats = formats.get_format_lazy('TIME_INPUT_FORMATS') default_error_messages = { 'invalid': _('Enter a valid time.') } def to_python(self, value): """ Validate that the input can be converted to a time. Return a Python datetime.time object. """ if value in self.empty_values: return None if isinstance(value, datetime.time): return value return super().to_python(value) def strptime(self, value, format): return datetime.datetime.strptime(value, format).time() class DateTimeFormatsIterator: def __iter__(self): yield from formats.get_format('DATETIME_INPUT_FORMATS') yield from formats.get_format('DATE_INPUT_FORMATS') class DateTimeField(BaseTemporalField): widget = DateTimeInput input_formats = DateTimeFormatsIterator() default_error_messages = { 'invalid': _('Enter a valid date/time.'), } def prepare_value(self, value): if isinstance(value, datetime.datetime): value = to_current_timezone(value) return value def to_python(self, value): """ Validate that the input can be converted to a datetime. Return a Python datetime.datetime object. """ if value in self.empty_values: return None if isinstance(value, datetime.datetime): return from_current_timezone(value) if isinstance(value, datetime.date): result = datetime.datetime(value.year, value.month, value.day) return from_current_timezone(result) try: result = parse_datetime(value.strip()) except ValueError: raise ValidationError(self.error_messages['invalid'], code='invalid') if not result: result = super().to_python(value) return from_current_timezone(result) def strptime(self, value, format): return datetime.datetime.strptime(value, format) class DurationField(Field): default_error_messages = { 'invalid': _('Enter a valid duration.'), 'overflow': _('The number of days must be between {min_days} and {max_days}.') } def prepare_value(self, value): if isinstance(value, datetime.timedelta): return duration_string(value) return value def to_python(self, value): if value in self.empty_values: return None if isinstance(value, datetime.timedelta): return value try: value = parse_duration(str(value)) except OverflowError: raise ValidationError(self.error_messages['overflow'].format( min_days=datetime.timedelta.min.days, max_days=datetime.timedelta.max.days, ), code='overflow') if value is None: raise ValidationError(self.error_messages['invalid'], code='invalid') return value class RegexField(CharField): def __init__(self, regex, **kwargs): """ regex can be either a string or a compiled regular expression object. """ kwargs.setdefault('strip', False) super().__init__(**kwargs) self._set_regex(regex) def _get_regex(self): return self._regex def _set_regex(self, regex): if isinstance(regex, str): regex = re.compile(regex) self._regex = regex if hasattr(self, '_regex_validator') and self._regex_validator in self.validators: self.validators.remove(self._regex_validator) self._regex_validator = validators.RegexValidator(regex=regex) self.validators.append(self._regex_validator) regex = property(_get_regex, _set_regex) class EmailField(CharField): widget = EmailInput default_validators = [validators.validate_email] def __init__(self, **kwargs): super().__init__(strip=True, **kwargs) class FileField(Field): widget = ClearableFileInput default_error_messages = { 'invalid': _("No file was submitted. Check the encoding type on the form."), 'missing': _("No file was submitted."), 'empty': _("The submitted file is empty."), 'max_length': ngettext_lazy( 'Ensure this filename has at most %(max)d character (it has %(length)d).', 'Ensure this filename has at most %(max)d characters (it has %(length)d).', 'max'), 'contradiction': _('Please either submit a file or check the clear checkbox, not both.') } def __init__(self, *, max_length=None, allow_empty_file=False, **kwargs): self.max_length = max_length self.allow_empty_file = allow_empty_file super().__init__(**kwargs) def to_python(self, data): if data in self.empty_values: return None # UploadedFile objects should have name and size attributes. try: file_name = data.name file_size = data.size except AttributeError: raise ValidationError(self.error_messages['invalid'], code='invalid') if self.max_length is not None and len(file_name) > self.max_length: params = {'max': self.max_length, 'length': len(file_name)} raise ValidationError(self.error_messages['max_length'], code='max_length', params=params) if not file_name: raise ValidationError(self.error_messages['invalid'], code='invalid') if not self.allow_empty_file and not file_size: raise ValidationError(self.error_messages['empty'], code='empty') return data def clean(self, data, initial=None): # If the widget got contradictory inputs, we raise a validation error if data is FILE_INPUT_CONTRADICTION: raise ValidationError(self.error_messages['contradiction'], code='contradiction') # False means the field value should be cleared; further validation is # not needed. if data is False: if not self.required: return False # If the field is required, clearing is not possible (the widget # shouldn't return False data in that case anyway). False is not # in self.empty_value; if a False value makes it this far # it should be validated from here on out as None (so it will be # caught by the required check). data = None if not data and initial: return initial return super().clean(data) def bound_data(self, data, initial): if data in (None, FILE_INPUT_CONTRADICTION): return initial return data def has_changed(self, initial, data): return not self.disabled and data is not None class ImageField(FileField): default_validators = [validators.validate_image_file_extension] default_error_messages = { 'invalid_image': _( "Upload a valid image. The file you uploaded was either not an " "image or a corrupted image." ), } def to_python(self, data): """ Check that the file-upload field data contains a valid image (GIF, JPG, PNG, etc. -- whatever Pillow supports). """ f = super().to_python(data) if f is None: return None from PIL import Image # We need to get a file object for Pillow. We might have a path or we might # have to read the data into memory. if hasattr(data, 'temporary_file_path'): file = data.temporary_file_path() else: if hasattr(data, 'read'): file = BytesIO(data.read()) else: file = BytesIO(data['content']) try: # load() could spot a truncated JPEG, but it loads the entire # image in memory, which is a DoS vector. See #3848 and #18520. image = Image.open(file) # verify() must be called immediately after the constructor. image.verify() # Annotating so subclasses can reuse it for their own validation f.image = image # Pillow doesn't detect the MIME type of all formats. In those # cases, content_type will be None. f.content_type = Image.MIME.get(image.format) except Exception as exc: # Pillow doesn't recognize it as an image. raise ValidationError( self.error_messages['invalid_image'], code='invalid_image', ) from exc if hasattr(f, 'seek') and callable(f.seek): f.seek(0) return f def widget_attrs(self, widget): attrs = super().widget_attrs(widget) if isinstance(widget, FileInput) and 'accept' not in widget.attrs: attrs.setdefault('accept', 'image/*') return attrs class URLField(CharField): widget = URLInput default_error_messages = { 'invalid': _('Enter a valid URL.'), } default_validators = [validators.URLValidator()] def __init__(self, **kwargs): super().__init__(strip=True, **kwargs) def to_python(self, value): def split_url(url): """ Return a list of url parts via urlparse.urlsplit(), or raise ValidationError for some malformed URLs. """ try: return list(urlsplit(url)) except ValueError: # urlparse.urlsplit can raise a ValueError with some # misformatted URLs. raise ValidationError(self.error_messages['invalid'], code='invalid') value = super().to_python(value) if value: url_fields = split_url(value) if not url_fields[0]: # If no URL scheme given, assume http:// url_fields[0] = 'http' if not url_fields[1]: # Assume that if no domain is provided, that the path segment # contains the domain. url_fields[1] = url_fields[2] url_fields[2] = '' # Rebuild the url_fields list, since the domain segment may now # contain the path too. url_fields = split_url(urlunsplit(url_fields)) value = urlunsplit(url_fields) return value class BooleanField(Field): widget = CheckboxInput def to_python(self, value): """Return a Python boolean object.""" # Explicitly check for the string 'False', which is what a hidden field # will submit for False. Also check for '0', since this is what # RadioSelect will provide. Because bool("True") == bool('1') == True, # we don't need to handle that explicitly. if isinstance(value, str) and value.lower() in ('false', '0'): value = False else: value = bool(value) return super().to_python(value) def validate(self, value): if not value and self.required: raise ValidationError(self.error_messages['required'], code='required') def has_changed(self, initial, data): if self.disabled: return False # Sometimes data or initial may be a string equivalent of a boolean # so we should run it through to_python first to get a boolean value return self.to_python(initial) != self.to_python(data) class NullBooleanField(BooleanField): """ A field whose valid values are None, True, and False. Clean invalid values to None. """ widget = NullBooleanSelect def to_python(self, value): """ Explicitly check for the string 'True' and 'False', which is what a hidden field will submit for True and False, for 'true' and 'false', which are likely to be returned by JavaScript serializations of forms, and for '1' and '0', which is what a RadioField will submit. Unlike the Booleanfield, this field must check for True because it doesn't use the bool() function. """ if value in (True, 'True', 'true', '1'): return True elif value in (False, 'False', 'false', '0'): return False else: return None def validate(self, value): pass class CallableChoiceIterator: def __init__(self, choices_func): self.choices_func = choices_func def __iter__(self): yield from self.choices_func() class ChoiceField(Field): widget = Select default_error_messages = { 'invalid_choice': _('Select a valid choice. %(value)s is not one of the available choices.'), } def __init__(self, *, choices=(), **kwargs): super().__init__(**kwargs) self.choices = choices def __deepcopy__(self, memo): result = super().__deepcopy__(memo) result._choices = copy.deepcopy(self._choices, memo) return result def _get_choices(self): return self._choices def _set_choices(self, value): # Setting choices also sets the choices on the widget. # choices can be any iterable, but we call list() on it because # it will be consumed more than once. if callable(value): value = CallableChoiceIterator(value) else: value = list(value) self._choices = self.widget.choices = value choices = property(_get_choices, _set_choices) def to_python(self, value): """Return a string.""" if value in self.empty_values: return '' return str(value) def validate(self, value): """Validate that the input is in self.choices.""" super().validate(value) if value and not self.valid_value(value): raise ValidationError( self.error_messages['invalid_choice'], code='invalid_choice', params={'value': value}, ) def valid_value(self, value): """Check to see if the provided value is a valid choice.""" text_value = str(value) for k, v in self.choices: if isinstance(v, (list, tuple)): # This is an optgroup, so look inside the group for options for k2, v2 in v: if value == k2 or text_value == str(k2): return True else: if value == k or text_value == str(k): return True return False class TypedChoiceField(ChoiceField): def __init__(self, *, coerce=lambda val: val, empty_value='', **kwargs): self.coerce = coerce self.empty_value = empty_value super().__init__(**kwargs) def _coerce(self, value): """ Validate that the value can be coerced to the right type (if not empty). """ if value == self.empty_value or value in self.empty_values: return self.empty_value try: value = self.coerce(value) except (ValueError, TypeError, ValidationError): raise ValidationError( self.error_messages['invalid_choice'], code='invalid_choice', params={'value': value}, ) return value def clean(self, value): value = super().clean(value) return self._coerce(value) class MultipleChoiceField(ChoiceField): hidden_widget = MultipleHiddenInput widget = SelectMultiple default_error_messages = { 'invalid_choice': _('Select a valid choice. %(value)s is not one of the available choices.'), 'invalid_list': _('Enter a list of values.'), } def to_python(self, value): if not value: return [] elif not isinstance(value, (list, tuple)): raise ValidationError(self.error_messages['invalid_list'], code='invalid_list') return [str(val) for val in value] def validate(self, value): """Validate that the input is a list or tuple.""" if self.required and not value: raise ValidationError(self.error_messages['required'], code='required') # Validate that each value in the value list is in self.choices. for val in value: if not self.valid_value(val): raise ValidationError( self.error_messages['invalid_choice'], code='invalid_choice', params={'value': val}, ) def has_changed(self, initial, data): if self.disabled: return False if initial is None: initial = [] if data is None: data = [] if len(initial) != len(data): return True initial_set = {str(value) for value in initial} data_set = {str(value) for value in data} return data_set != initial_set class TypedMultipleChoiceField(MultipleChoiceField): def __init__(self, *, coerce=lambda val: val, **kwargs): self.coerce = coerce self.empty_value = kwargs.pop('empty_value', []) super().__init__(**kwargs) def _coerce(self, value): """ Validate that the values are in self.choices and can be coerced to the right type. """ if value == self.empty_value or value in self.empty_values: return self.empty_value new_value = [] for choice in value: try: new_value.append(self.coerce(choice)) except (ValueError, TypeError, ValidationError): raise ValidationError( self.error_messages['invalid_choice'], code='invalid_choice', params={'value': choice}, ) return new_value def clean(self, value): value = super().clean(value) return self._coerce(value) def validate(self, value): if value != self.empty_value: super().validate(value) elif self.required: raise ValidationError(self.error_messages['required'], code='required') class ComboField(Field): """ A Field whose clean() method calls multiple Field clean() methods. """ def __init__(self, fields, **kwargs): super().__init__(**kwargs) # Set 'required' to False on the individual fields, because the # required validation will be handled by ComboField, not by those # individual fields. for f in fields: f.required = False self.fields = fields def clean(self, value): """ Validate the given value against all of self.fields, which is a list of Field instances. """ super().clean(value) for field in self.fields: value = field.clean(value) return value class MultiValueField(Field): """ Aggregate the logic of multiple Fields. Its clean() method takes a "decompressed" list of values, which are then cleaned into a single value according to self.fields. Each value in this list is cleaned by the corresponding field -- the first value is cleaned by the first field, the second value is cleaned by the second field, etc. Once all fields are cleaned, the list of clean values is "compressed" into a single value. Subclasses should not have to implement clean(). Instead, they must implement compress(), which takes a list of valid values and returns a "compressed" version of those values -- a single value. You'll probably want to use this with MultiWidget. """ default_error_messages = { 'invalid': _('Enter a list of values.'), 'incomplete': _('Enter a complete value.'), } def __init__(self, fields, *, require_all_fields=True, **kwargs): self.require_all_fields = require_all_fields super().__init__(**kwargs) for f in fields: f.error_messages.setdefault('incomplete', self.error_messages['incomplete']) if self.disabled: f.disabled = True if self.require_all_fields: # Set 'required' to False on the individual fields, because the # required validation will be handled by MultiValueField, not # by those individual fields. f.required = False self.fields = fields def __deepcopy__(self, memo): result = super().__deepcopy__(memo) result.fields = tuple(x.__deepcopy__(memo) for x in self.fields) return result def validate(self, value): pass def clean(self, value): """ Validate every value in the given list. A value is validated against the corresponding Field in self.fields. For example, if this MultiValueField was instantiated with fields=(DateField(), TimeField()), clean() would call DateField.clean(value[0]) and TimeField.clean(value[1]). """ clean_data = [] errors = [] if self.disabled and not isinstance(value, list): value = self.widget.decompress(value) if not value or isinstance(value, (list, tuple)): if not value or not [v for v in value if v not in self.empty_values]: if self.required: raise ValidationError(self.error_messages['required'], code='required') else: return self.compress([]) else: raise ValidationError(self.error_messages['invalid'], code='invalid') for i, field in enumerate(self.fields): try: field_value = value[i] except IndexError: field_value = None if field_value in self.empty_values: if self.require_all_fields: # Raise a 'required' error if the MultiValueField is # required and any field is empty. if self.required: raise ValidationError(self.error_messages['required'], code='required') elif field.required: # Otherwise, add an 'incomplete' error to the list of # collected errors and skip field cleaning, if a required # field is empty. if field.error_messages['incomplete'] not in errors: errors.append(field.error_messages['incomplete']) continue try: clean_data.append(field.clean(field_value)) except ValidationError as e: # Collect all validation errors in a single list, which we'll # raise at the end of clean(), rather than raising a single # exception for the first error we encounter. Skip duplicates. errors.extend(m for m in e.error_list if m not in errors) if errors: raise ValidationError(errors) out = self.compress(clean_data) self.validate(out) self.run_validators(out) return out def compress(self, data_list): """ Return a single value for the given list of values. The values can be assumed to be valid. For example, if this MultiValueField was instantiated with fields=(DateField(), TimeField()), this might return a datetime object created by combining the date and time in data_list. """ raise NotImplementedError('Subclasses must implement this method.') def has_changed(self, initial, data): if self.disabled: return False if initial is None: initial = ['' for x in range(0, len(data))] else: if not isinstance(initial, list): initial = self.widget.decompress(initial) for field, initial, data in zip(self.fields, initial, data): try: initial = field.to_python(initial) except ValidationError: return True if field.has_changed(initial, data): return True return False class FilePathField(ChoiceField): def __init__(self, path, *, match=None, recursive=False, allow_files=True, allow_folders=False, **kwargs): self.path, self.match, self.recursive = path, match, recursive self.allow_files, self.allow_folders = allow_files, allow_folders super().__init__(choices=(), **kwargs) if self.required: self.choices = [] else: self.choices = [("", "---------")] if self.match is not None: self.match_re = re.compile(self.match) if recursive: for root, dirs, files in sorted(os.walk(self.path)): if self.allow_files: for f in sorted(files): if self.match is None or self.match_re.search(f): f = os.path.join(root, f) self.choices.append((f, f.replace(path, "", 1))) if self.allow_folders: for f in sorted(dirs): if f == '__pycache__': continue if self.match is None or self.match_re.search(f): f = os.path.join(root, f) self.choices.append((f, f.replace(path, "", 1))) else: choices = [] for f in os.scandir(self.path): if f.name == '__pycache__': continue if (((self.allow_files and f.is_file()) or (self.allow_folders and f.is_dir())) and (self.match is None or self.match_re.search(f.name))): choices.append((f.path, f.name)) choices.sort(key=operator.itemgetter(1)) self.choices.extend(choices) self.widget.choices = self.choices class SplitDateTimeField(MultiValueField): widget = SplitDateTimeWidget hidden_widget = SplitHiddenDateTimeWidget default_error_messages = { 'invalid_date': _('Enter a valid date.'), 'invalid_time': _('Enter a valid time.'), } def __init__(self, *, input_date_formats=None, input_time_formats=None, **kwargs): errors = self.default_error_messages.copy() if 'error_messages' in kwargs: errors.update(kwargs['error_messages']) localize = kwargs.get('localize', False) fields = ( DateField(input_formats=input_date_formats, error_messages={'invalid': errors['invalid_date']}, localize=localize), TimeField(input_formats=input_time_formats, error_messages={'invalid': errors['invalid_time']}, localize=localize), ) super().__init__(fields, **kwargs) def compress(self, data_list): if data_list: # Raise a validation error if time or date is empty # (possible if SplitDateTimeField has required=False). if data_list[0] in self.empty_values: raise ValidationError(self.error_messages['invalid_date'], code='invalid_date') if data_list[1] in self.empty_values: raise ValidationError(self.error_messages['invalid_time'], code='invalid_time') result = datetime.datetime.combine(*data_list) return from_current_timezone(result) return None class GenericIPAddressField(CharField): def __init__(self, *, protocol='both', unpack_ipv4=False, **kwargs): self.unpack_ipv4 = unpack_ipv4 self.default_validators = validators.ip_address_validators(protocol, unpack_ipv4)[0] super().__init__(**kwargs) def to_python(self, value): if value in self.empty_values: return '' value = value.strip() if value and ':' in value: return clean_ipv6_address(value, self.unpack_ipv4) return value class SlugField(CharField): default_validators = [validators.validate_slug] def __init__(self, *, allow_unicode=False, **kwargs): self.allow_unicode = allow_unicode if self.allow_unicode: self.default_validators = [validators.validate_unicode_slug] super().__init__(**kwargs) class UUIDField(CharField): default_error_messages = { 'invalid': _('Enter a valid UUID.'), } def prepare_value(self, value): if isinstance(value, uuid.UUID): return str(value) return value def to_python(self, value): value = super().to_python(value) if value in self.empty_values: return None if not isinstance(value, uuid.UUID): try: value = uuid.UUID(value) except ValueError: raise ValidationError(self.error_messages['invalid'], code='invalid') return value class InvalidJSONInput(str): pass class JSONString(str): pass class JSONField(CharField): default_error_messages = { 'invalid': _('Enter a valid JSON.'), } widget = Textarea def __init__(self, encoder=None, decoder=None, **kwargs): self.encoder = encoder self.decoder = decoder super().__init__(**kwargs) def to_python(self, value): if self.disabled: return value if value in self.empty_values: return None elif isinstance(value, (list, dict, int, float, JSONString)): return value try: converted = json.loads(value, cls=self.decoder) except json.JSONDecodeError: raise ValidationError( self.error_messages['invalid'], code='invalid', params={'value': value}, ) if isinstance(converted, str): return JSONString(converted) else: return converted def bound_data(self, data, initial): if self.disabled: return initial try: return json.loads(data, cls=self.decoder) except json.JSONDecodeError: return InvalidJSONInput(data) def prepare_value(self, value): if isinstance(value, InvalidJSONInput): return value return json.dumps(value, cls=self.encoder) def has_changed(self, initial, data): if super().has_changed(initial, data): return True # For purposes of seeing whether something has changed, True isn't the # same as 1 and the order of keys doesn't matter. return ( json.dumps(initial, sort_keys=True, cls=self.encoder) != json.dumps(self.to_python(data), sort_keys=True, cls=self.encoder) )
e88447ac029a079faaf8010fdebfaedbfe11c391acb6199cc221374fd07ec277
import json from collections import UserList from django.conf import settings from django.core.exceptions import ValidationError from django.utils import timezone from django.utils.html import escape, format_html, format_html_join, html_safe from django.utils.translation import gettext_lazy as _ def pretty_name(name): """Convert 'first_name' to 'First name'.""" if not name: return '' return name.replace('_', ' ').capitalize() def flatatt(attrs): """ Convert a dictionary of attributes to a single string. The returned string will contain a leading space followed by key="value", XML-style pairs. In the case of a boolean value, the key will appear without a value. It is assumed that the keys do not need to be XML-escaped. If the passed dictionary is empty, then return an empty string. The result is passed through 'mark_safe' (by way of 'format_html_join'). """ key_value_attrs = [] boolean_attrs = [] for attr, value in attrs.items(): if isinstance(value, bool): if value: boolean_attrs.append((attr,)) elif value is not None: key_value_attrs.append((attr, value)) return ( format_html_join('', ' {}="{}"', sorted(key_value_attrs)) + format_html_join('', ' {}', sorted(boolean_attrs)) ) @html_safe class ErrorDict(dict): """ A collection of errors that knows how to display itself in various formats. The dictionary keys are the field names, and the values are the errors. """ def as_data(self): return {f: e.as_data() for f, e in self.items()} def get_json_data(self, escape_html=False): return {f: e.get_json_data(escape_html) for f, e in self.items()} def as_json(self, escape_html=False): return json.dumps(self.get_json_data(escape_html)) def as_ul(self): if not self: return '' return format_html( '<ul class="errorlist">{}</ul>', format_html_join('', '<li>{}{}</li>', self.items()) ) def as_text(self): output = [] for field, errors in self.items(): output.append('* %s' % field) output.append('\n'.join(' * %s' % e for e in errors)) return '\n'.join(output) def __str__(self): return self.as_ul() @html_safe class ErrorList(UserList, list): """ A collection of errors that knows how to display itself in various formats. """ def __init__(self, initlist=None, error_class=None): super().__init__(initlist) if error_class is None: self.error_class = 'errorlist' else: self.error_class = 'errorlist {}'.format(error_class) def as_data(self): return ValidationError(self.data).error_list def copy(self): copy = super().copy() copy.error_class = self.error_class return copy def get_json_data(self, escape_html=False): errors = [] for error in self.as_data(): message = next(iter(error)) errors.append({ 'message': escape(message) if escape_html else message, 'code': error.code or '', }) return errors def as_json(self, escape_html=False): return json.dumps(self.get_json_data(escape_html)) def as_ul(self): if not self.data: return '' return format_html( '<ul class="{}">{}</ul>', self.error_class, format_html_join('', '<li>{}</li>', ((e,) for e in self)) ) def as_text(self): return '\n'.join('* %s' % e for e in self) def __str__(self): return self.as_ul() def __repr__(self): return repr(list(self)) def __contains__(self, item): return item in list(self) def __eq__(self, other): return list(self) == other def __getitem__(self, i): error = self.data[i] if isinstance(error, ValidationError): return next(iter(error)) return error def __reduce_ex__(self, *args, **kwargs): # The `list` reduce function returns an iterator as the fourth element # that is normally used for repopulating. Since we only inherit from # `list` for `isinstance` backward compatibility (Refs #17413) we # nullify this iterator as it would otherwise result in duplicate # entries. (Refs #23594) info = super(UserList, self).__reduce_ex__(*args, **kwargs) return info[:3] + (None, None) # Utilities for time zone support in DateTimeField et al. def from_current_timezone(value): """ When time zone support is enabled, convert naive datetimes entered in the current time zone to aware datetimes. """ if settings.USE_TZ and value is not None and timezone.is_naive(value): current_timezone = timezone.get_current_timezone() try: return timezone.make_aware(value, current_timezone) except Exception as exc: raise ValidationError( _('%(datetime)s couldn’t be interpreted ' 'in time zone %(current_timezone)s; it ' 'may be ambiguous or it may not exist.'), code='ambiguous_timezone', params={'datetime': value, 'current_timezone': current_timezone} ) from exc return value def to_current_timezone(value): """ When time zone support is enabled, convert aware datetimes to naive datetimes in the current time zone for display. """ if settings.USE_TZ and value is not None and timezone.is_aware(value): return timezone.make_naive(value) return value
9ec52018ec6b9ce77b6a95e9bf4bc22bafe796c14309b816beb049b39a0d8c7a
""" Form classes """ import copy from django.core.exceptions import NON_FIELD_ERRORS, ValidationError from django.forms.fields import Field, FileField from django.forms.utils import ErrorDict, ErrorList from django.forms.widgets import Media, MediaDefiningClass from django.utils.datastructures import MultiValueDict from django.utils.functional import cached_property from django.utils.html import conditional_escape, html_safe from django.utils.safestring import mark_safe from django.utils.translation import gettext as _ from .renderers import get_default_renderer __all__ = ('BaseForm', 'Form') class DeclarativeFieldsMetaclass(MediaDefiningClass): """Collect Fields declared on the base classes.""" def __new__(mcs, name, bases, attrs): # Collect fields from current class. current_fields = [] for key, value in list(attrs.items()): if isinstance(value, Field): current_fields.append((key, value)) attrs.pop(key) attrs['declared_fields'] = dict(current_fields) new_class = super().__new__(mcs, name, bases, attrs) # Walk through the MRO. declared_fields = {} for base in reversed(new_class.__mro__): # Collect fields from base class. if hasattr(base, 'declared_fields'): declared_fields.update(base.declared_fields) # Field shadowing. for attr, value in base.__dict__.items(): if value is None and attr in declared_fields: declared_fields.pop(attr) new_class.base_fields = declared_fields new_class.declared_fields = declared_fields return new_class @html_safe class BaseForm: """ The main implementation of all the Form logic. Note that this class is different than Form. See the comments by the Form class for more info. Any improvements to the form API should be made to this class, not to the Form class. """ default_renderer = None field_order = None prefix = None use_required_attribute = True def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None, initial=None, error_class=ErrorList, label_suffix=None, empty_permitted=False, field_order=None, use_required_attribute=None, renderer=None): self.is_bound = data is not None or files is not None self.data = MultiValueDict() if data is None else data self.files = MultiValueDict() if files is None else files self.auto_id = auto_id if prefix is not None: self.prefix = prefix self.initial = initial or {} self.error_class = error_class # Translators: This is the default suffix added to form field labels self.label_suffix = label_suffix if label_suffix is not None else _(':') self.empty_permitted = empty_permitted self._errors = None # Stores the errors after clean() has been called. # The base_fields class attribute is the *class-wide* definition of # fields. Because a particular *instance* of the class might want to # alter self.fields, we create self.fields here by copying base_fields. # Instances should always modify self.fields; they should not modify # self.base_fields. self.fields = copy.deepcopy(self.base_fields) self._bound_fields_cache = {} self.order_fields(self.field_order if field_order is None else field_order) if use_required_attribute is not None: self.use_required_attribute = use_required_attribute if self.empty_permitted and self.use_required_attribute: raise ValueError( 'The empty_permitted and use_required_attribute arguments may ' 'not both be True.' ) # Initialize form renderer. Use a global default if not specified # either as an argument or as self.default_renderer. if renderer is None: if self.default_renderer is None: renderer = get_default_renderer() else: renderer = self.default_renderer if isinstance(self.default_renderer, type): renderer = renderer() self.renderer = renderer def order_fields(self, field_order): """ Rearrange the fields according to field_order. field_order is a list of field names specifying the order. Append fields not included in the list in the default order for backward compatibility with subclasses not overriding field_order. If field_order is None, keep all fields in the order defined in the class. Ignore unknown fields in field_order to allow disabling fields in form subclasses without redefining ordering. """ if field_order is None: return fields = {} for key in field_order: try: fields[key] = self.fields.pop(key) except KeyError: # ignore unknown fields pass fields.update(self.fields) # add remaining fields in original order self.fields = fields def __str__(self): return self.as_table() def __repr__(self): if self._errors is None: is_valid = "Unknown" else: is_valid = self.is_bound and not self._errors return '<%(cls)s bound=%(bound)s, valid=%(valid)s, fields=(%(fields)s)>' % { 'cls': self.__class__.__name__, 'bound': self.is_bound, 'valid': is_valid, 'fields': ';'.join(self.fields), } def __iter__(self): for name in self.fields: yield self[name] def __getitem__(self, name): """Return a BoundField with the given name.""" try: field = self.fields[name] except KeyError: raise KeyError( "Key '%s' not found in '%s'. Choices are: %s." % ( name, self.__class__.__name__, ', '.join(sorted(self.fields)), ) ) if name not in self._bound_fields_cache: self._bound_fields_cache[name] = field.get_bound_field(self, name) return self._bound_fields_cache[name] @property def errors(self): """Return an ErrorDict for the data provided for the form.""" if self._errors is None: self.full_clean() return self._errors def is_valid(self): """Return True if the form has no errors, or False otherwise.""" return self.is_bound and not self.errors def add_prefix(self, field_name): """ Return the field name with a prefix appended, if this Form has a prefix set. Subclasses may wish to override. """ return '%s-%s' % (self.prefix, field_name) if self.prefix else field_name def add_initial_prefix(self, field_name): """Add an 'initial' prefix for checking dynamic initial values.""" return 'initial-%s' % self.add_prefix(field_name) def _html_output(self, normal_row, error_row, row_ender, help_text_html, errors_on_separate_row): "Output HTML. Used by as_table(), as_ul(), as_p()." # Errors that should be displayed above all fields. top_errors = self.non_field_errors().copy() output, hidden_fields = [], [] for name, field in self.fields.items(): html_class_attr = '' bf = self[name] bf_errors = self.error_class(bf.errors) if bf.is_hidden: if bf_errors: top_errors.extend( [_('(Hidden field %(name)s) %(error)s') % {'name': name, 'error': str(e)} for e in bf_errors]) hidden_fields.append(str(bf)) else: # Create a 'class="..."' attribute if the row should have any # CSS classes applied. css_classes = bf.css_classes() if css_classes: html_class_attr = ' class="%s"' % css_classes if errors_on_separate_row and bf_errors: output.append(error_row % str(bf_errors)) if bf.label: label = conditional_escape(bf.label) label = bf.label_tag(label) or '' else: label = '' if field.help_text: help_text = help_text_html % field.help_text else: help_text = '' output.append(normal_row % { 'errors': bf_errors, 'label': label, 'field': bf, 'help_text': help_text, 'html_class_attr': html_class_attr, 'css_classes': css_classes, 'field_name': bf.html_name, }) if top_errors: output.insert(0, error_row % top_errors) if hidden_fields: # Insert any hidden fields in the last row. str_hidden = ''.join(hidden_fields) if output: last_row = output[-1] # Chop off the trailing row_ender (e.g. '</td></tr>') and # insert the hidden fields. if not last_row.endswith(row_ender): # This can happen in the as_p() case (and possibly others # that users write): if there are only top errors, we may # not be able to conscript the last row for our purposes, # so insert a new, empty row. last_row = (normal_row % { 'errors': '', 'label': '', 'field': '', 'help_text': '', 'html_class_attr': html_class_attr, 'css_classes': '', 'field_name': '', }) output.append(last_row) output[-1] = last_row[:-len(row_ender)] + str_hidden + row_ender else: # If there aren't any rows in the output, just append the # hidden fields. output.append(str_hidden) return mark_safe('\n'.join(output)) def as_table(self): "Return this form rendered as HTML <tr>s -- excluding the <table></table>." return self._html_output( normal_row='<tr%(html_class_attr)s><th>%(label)s</th><td>%(errors)s%(field)s%(help_text)s</td></tr>', error_row='<tr><td colspan="2">%s</td></tr>', row_ender='</td></tr>', help_text_html='<br><span class="helptext">%s</span>', errors_on_separate_row=False, ) def as_ul(self): "Return this form rendered as HTML <li>s -- excluding the <ul></ul>." return self._html_output( normal_row='<li%(html_class_attr)s>%(errors)s%(label)s %(field)s%(help_text)s</li>', error_row='<li>%s</li>', row_ender='</li>', help_text_html=' <span class="helptext">%s</span>', errors_on_separate_row=False, ) def as_p(self): "Return this form rendered as HTML <p>s." return self._html_output( normal_row='<p%(html_class_attr)s>%(label)s %(field)s%(help_text)s</p>', error_row='%s', row_ender='</p>', help_text_html=' <span class="helptext">%s</span>', errors_on_separate_row=True, ) def non_field_errors(self): """ Return an ErrorList of errors that aren't associated with a particular field -- i.e., from Form.clean(). Return an empty ErrorList if there are none. """ return self.errors.get(NON_FIELD_ERRORS, self.error_class(error_class='nonfield')) def add_error(self, field, error): """ Update the content of `self._errors`. The `field` argument is the name of the field to which the errors should be added. If it's None, treat the errors as NON_FIELD_ERRORS. The `error` argument can be a single error, a list of errors, or a dictionary that maps field names to lists of errors. An "error" can be either a simple string or an instance of ValidationError with its message attribute set and a "list or dictionary" can be an actual `list` or `dict` or an instance of ValidationError with its `error_list` or `error_dict` attribute set. If `error` is a dictionary, the `field` argument *must* be None and errors will be added to the fields that correspond to the keys of the dictionary. """ if not isinstance(error, ValidationError): # Normalize to ValidationError and let its constructor # do the hard work of making sense of the input. error = ValidationError(error) if hasattr(error, 'error_dict'): if field is not None: raise TypeError( "The argument `field` must be `None` when the `error` " "argument contains errors for multiple fields." ) else: error = error.error_dict else: error = {field or NON_FIELD_ERRORS: error.error_list} for field, error_list in error.items(): if field not in self.errors: if field != NON_FIELD_ERRORS and field not in self.fields: raise ValueError( "'%s' has no field named '%s'." % (self.__class__.__name__, field)) if field == NON_FIELD_ERRORS: self._errors[field] = self.error_class(error_class='nonfield') else: self._errors[field] = self.error_class() self._errors[field].extend(error_list) if field in self.cleaned_data: del self.cleaned_data[field] def has_error(self, field, code=None): return field in self.errors and ( code is None or any(error.code == code for error in self.errors.as_data()[field]) ) def full_clean(self): """ Clean all of self.data and populate self._errors and self.cleaned_data. """ self._errors = ErrorDict() if not self.is_bound: # Stop further processing. return self.cleaned_data = {} # If the form is permitted to be empty, and none of the form data has # changed from the initial data, short circuit any validation. if self.empty_permitted and not self.has_changed(): return self._clean_fields() self._clean_form() self._post_clean() def _clean_fields(self): for name, field in self.fields.items(): # value_from_datadict() gets the data from the data dictionaries. # Each widget type knows how to retrieve its own data, because some # widgets split data over several HTML fields. if field.disabled: value = self.get_initial_for_field(field, name) else: value = field.widget.value_from_datadict(self.data, self.files, self.add_prefix(name)) try: if isinstance(field, FileField): initial = self.get_initial_for_field(field, name) value = field.clean(value, initial) else: value = field.clean(value) self.cleaned_data[name] = value if hasattr(self, 'clean_%s' % name): value = getattr(self, 'clean_%s' % name)() self.cleaned_data[name] = value except ValidationError as e: self.add_error(name, e) def _clean_form(self): try: cleaned_data = self.clean() except ValidationError as e: self.add_error(None, e) else: if cleaned_data is not None: self.cleaned_data = cleaned_data def _post_clean(self): """ An internal hook for performing additional cleaning after form cleaning is complete. Used for model validation in model forms. """ pass def clean(self): """ Hook for doing any extra form-wide cleaning after Field.clean() has been called on every field. Any ValidationError raised by this method will not be associated with a particular field; it will have a special-case association with the field named '__all__'. """ return self.cleaned_data def has_changed(self): """Return True if data differs from initial.""" return bool(self.changed_data) @cached_property def changed_data(self): data = [] for name, field in self.fields.items(): prefixed_name = self.add_prefix(name) data_value = field.widget.value_from_datadict(self.data, self.files, prefixed_name) if not field.show_hidden_initial: # Use the BoundField's initial as this is the value passed to # the widget. initial_value = self[name].initial else: initial_prefixed_name = self.add_initial_prefix(name) hidden_widget = field.hidden_widget() try: initial_value = field.to_python(hidden_widget.value_from_datadict( self.data, self.files, initial_prefixed_name)) except ValidationError: # Always assume data has changed if validation fails. data.append(name) continue if field.has_changed(initial_value, data_value): data.append(name) return data @property def media(self): """Return all media required to render the widgets on this form.""" media = Media() for field in self.fields.values(): media = media + field.widget.media return media def is_multipart(self): """ Return True if the form needs to be multipart-encoded, i.e. it has FileInput, or False otherwise. """ return any(field.widget.needs_multipart_form for field in self.fields.values()) def hidden_fields(self): """ Return a list of all the BoundField objects that are hidden fields. Useful for manual form layout in templates. """ return [field for field in self if field.is_hidden] def visible_fields(self): """ Return a list of BoundField objects that aren't hidden fields. The opposite of the hidden_fields() method. """ return [field for field in self if not field.is_hidden] def get_initial_for_field(self, field, field_name): """ Return initial data for field on form. Use initial data from the form or the field, in that order. Evaluate callable values. """ value = self.initial.get(field_name, field.initial) if callable(value): value = value() return value class Form(BaseForm, metaclass=DeclarativeFieldsMetaclass): "A collection of Fields, plus their associated data." # This is a separate class from BaseForm in order to abstract the way # self.fields is specified. This class (Form) is the one that does the # fancy metaclass stuff purely for the semantic sugar -- it allows one # to define a form using declarative syntax. # BaseForm itself has no way of designating self.fields.
d959ba6acdc4fc01204c674458b35ca2d99fe3a710dd61d75f08fadf33dc9aa5
""" Functions for creating and restoring url-safe signed JSON objects. The format used looks like this: >>> signing.dumps("hello") 'ImhlbGxvIg:1QaUZC:YIye-ze3TTx7gtSv422nZA4sgmk' There are two components here, separated by a ':'. The first component is a URLsafe base64 encoded JSON of the object passed to dumps(). The second component is a base64 encoded hmac/SHA1 hash of "$first_component:$secret" signing.loads(s) checks the signature and returns the deserialized object. If the signature fails, a BadSignature exception is raised. >>> signing.loads("ImhlbGxvIg:1QaUZC:YIye-ze3TTx7gtSv422nZA4sgmk") 'hello' >>> signing.loads("ImhlbGxvIg:1QaUZC:YIye-ze3TTx7gtSv422nZA4sgmk-modified") ... BadSignature: Signature failed: ImhlbGxvIg:1QaUZC:YIye-ze3TTx7gtSv422nZA4sgmk-modified You can optionally compress the JSON prior to base64 encoding it to save space, using the compress=True argument. This checks if compression actually helps and only applies compression if the result is a shorter string: >>> signing.dumps(list(range(1, 20)), compress=True) '.eJwFwcERACAIwLCF-rCiILN47r-GyZVJsNgkxaFxoDgxcOHGxMKD_T7vhAml:1QaUaL:BA0thEZrp4FQVXIXuOvYJtLJSrQ' The fact that the string is compressed is signalled by the prefixed '.' at the start of the base64 JSON. There are 65 url-safe characters: the 64 used by url-safe base64 and the ':'. These functions make use of all of them. """ import base64 import datetime import json import time import zlib from django.conf import settings from django.utils import baseconv from django.utils.crypto import constant_time_compare, salted_hmac from django.utils.encoding import force_bytes from django.utils.module_loading import import_string from django.utils.regex_helper import _lazy_re_compile _SEP_UNSAFE = _lazy_re_compile(r'^[A-z0-9-_=]*$') class BadSignature(Exception): """Signature does not match.""" pass class SignatureExpired(BadSignature): """Signature timestamp is older than required max_age.""" pass def b64_encode(s): return base64.urlsafe_b64encode(s).strip(b'=') def b64_decode(s): pad = b'=' * (-len(s) % 4) return base64.urlsafe_b64decode(s + pad) def base64_hmac(salt, value, key, algorithm='sha1'): return b64_encode(salted_hmac(salt, value, key, algorithm=algorithm).digest()).decode() def get_cookie_signer(salt='django.core.signing.get_cookie_signer'): Signer = import_string(settings.SIGNING_BACKEND) key = force_bytes(settings.SECRET_KEY) # SECRET_KEY may be str or bytes. return Signer(b'django.http.cookies' + key, salt=salt) class JSONSerializer: """ Simple wrapper around json to be used in signing.dumps and signing.loads. """ def dumps(self, obj): return json.dumps(obj, separators=(',', ':')).encode('latin-1') def loads(self, data): return json.loads(data.decode('latin-1')) def dumps(obj, key=None, salt='django.core.signing', serializer=JSONSerializer, compress=False): """ Return URL-safe, hmac signed base64 compressed JSON string. If key is None, use settings.SECRET_KEY instead. The hmac algorithm is the default Signer algorithm. If compress is True (not the default), check if compressing using zlib can save some space. Prepend a '.' to signify compression. This is included in the signature, to protect against zip bombs. Salt can be used to namespace the hash, so that a signed string is only valid for a given namespace. Leaving this at the default value or re-using a salt value across different parts of your application without good cause is a security risk. The serializer is expected to return a bytestring. """ data = serializer().dumps(obj) # Flag for if it's been compressed or not is_compressed = False if compress: # Avoid zlib dependency unless compress is being used compressed = zlib.compress(data) if len(compressed) < (len(data) - 1): data = compressed is_compressed = True base64d = b64_encode(data).decode() if is_compressed: base64d = '.' + base64d return TimestampSigner(key, salt=salt).sign(base64d) def loads(s, key=None, salt='django.core.signing', serializer=JSONSerializer, max_age=None): """ Reverse of dumps(), raise BadSignature if signature fails. The serializer is expected to accept a bytestring. """ # TimestampSigner.unsign() returns str but base64 and zlib compression # operate on bytes. base64d = TimestampSigner(key, salt=salt).unsign(s, max_age=max_age).encode() decompress = base64d[:1] == b'.' if decompress: # It's compressed; uncompress it first base64d = base64d[1:] data = b64_decode(base64d) if decompress: data = zlib.decompress(data) return serializer().loads(data) class Signer: # RemovedInDjango40Warning. legacy_algorithm = 'sha1' def __init__(self, key=None, sep=':', salt=None, algorithm='sha256'): self.key = key or settings.SECRET_KEY self.sep = sep if _SEP_UNSAFE.match(self.sep): raise ValueError( 'Unsafe Signer separator: %r (cannot be empty or consist of ' 'only A-z0-9-_=)' % sep, ) self.salt = salt or '%s.%s' % (self.__class__.__module__, self.__class__.__name__) self.algorithm = algorithm def signature(self, value): return base64_hmac(self.salt + 'signer', value, self.key, algorithm=self.algorithm) def _legacy_signature(self, value): # RemovedInDjango40Warning. return base64_hmac(self.salt + 'signer', value, self.key, algorithm=self.legacy_algorithm) def sign(self, value): return '%s%s%s' % (value, self.sep, self.signature(value)) def unsign(self, signed_value): if self.sep not in signed_value: raise BadSignature('No "%s" found in value' % self.sep) value, sig = signed_value.rsplit(self.sep, 1) if ( constant_time_compare(sig, self.signature(value)) or ( self.legacy_algorithm and constant_time_compare(sig, self._legacy_signature(value)) ) ): return value raise BadSignature('Signature "%s" does not match' % sig) class TimestampSigner(Signer): def timestamp(self): return baseconv.base62.encode(int(time.time())) def sign(self, value): value = '%s%s%s' % (value, self.sep, self.timestamp()) return super().sign(value) def unsign(self, value, max_age=None): """ Retrieve original value and check it wasn't signed more than max_age seconds ago. """ result = super().unsign(value) value, timestamp = result.rsplit(self.sep, 1) timestamp = baseconv.base62.decode(timestamp) if max_age is not None: if isinstance(max_age, datetime.timedelta): max_age = max_age.total_seconds() # Check timestamp is not older than max_age age = time.time() - timestamp if age > max_age: raise SignatureExpired( 'Signature age %s > %s seconds' % (age, max_age)) return value
e6f8757bb2203cdefc5973e8efe84430f37db5072a252647bb458289b36077e1
from django.dispatch import Signal request_started = Signal() request_finished = Signal() got_request_exception = Signal() setting_changed = Signal()
caf7da9aee66c0a90e25c37f007f82af18068b6faa80cd523e575a1a4085f4f1
import ipaddress import re from pathlib import Path from urllib.parse import urlsplit, urlunsplit from django.core.exceptions import ValidationError from django.utils.deconstruct import deconstructible from django.utils.encoding import punycode from django.utils.ipv6 import is_valid_ipv6_address from django.utils.regex_helper import _lazy_re_compile from django.utils.translation import gettext_lazy as _, ngettext_lazy # These values, if given to validate(), will trigger the self.required check. EMPTY_VALUES = (None, '', [], (), {}) @deconstructible class RegexValidator: regex = '' message = _('Enter a valid value.') code = 'invalid' inverse_match = False flags = 0 def __init__(self, regex=None, message=None, code=None, inverse_match=None, flags=None): if regex is not None: self.regex = regex if message is not None: self.message = message if code is not None: self.code = code if inverse_match is not None: self.inverse_match = inverse_match if flags is not None: self.flags = flags if self.flags and not isinstance(self.regex, str): raise TypeError("If the flags are set, regex must be a regular expression string.") self.regex = _lazy_re_compile(self.regex, self.flags) def __call__(self, value): """ Validate that the input contains (or does *not* contain, if inverse_match is True) a match for the regular expression. """ regex_matches = self.regex.search(str(value)) invalid_input = regex_matches if self.inverse_match else not regex_matches if invalid_input: raise ValidationError(self.message, code=self.code) def __eq__(self, other): return ( isinstance(other, RegexValidator) and self.regex.pattern == other.regex.pattern and self.regex.flags == other.regex.flags and (self.message == other.message) and (self.code == other.code) and (self.inverse_match == other.inverse_match) ) @deconstructible class URLValidator(RegexValidator): ul = '\u00a1-\uffff' # Unicode letters range (must not be a raw string). # IP patterns ipv4_re = r'(?:25[0-5]|2[0-4]\d|[0-1]?\d?\d)(?:\.(?:25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}' ipv6_re = r'\[[0-9a-f:.]+\]' # (simple regex, validated later) # Host patterns hostname_re = r'[a-z' + ul + r'0-9](?:[a-z' + ul + r'0-9-]{0,61}[a-z' + ul + r'0-9])?' # Max length for domain name labels is 63 characters per RFC 1034 sec. 3.1 domain_re = r'(?:\.(?!-)[a-z' + ul + r'0-9-]{1,63}(?<!-))*' tld_re = ( r'\.' # dot r'(?!-)' # can't start with a dash r'(?:[a-z' + ul + '-]{2,63}' # domain label r'|xn--[a-z0-9]{1,59})' # or punycode label r'(?<!-)' # can't end with a dash r'\.?' # may have a trailing dot ) host_re = '(' + hostname_re + domain_re + tld_re + '|localhost)' regex = _lazy_re_compile( r'^(?:[a-z0-9.+-]*)://' # scheme is validated separately r'(?:[^\s:@/]+(?::[^\s:@/]*)?@)?' # user:pass authentication r'(?:' + ipv4_re + '|' + ipv6_re + '|' + host_re + ')' r'(?::\d{2,5})?' # port r'(?:[/?#][^\s]*)?' # resource path r'\Z', re.IGNORECASE) message = _('Enter a valid URL.') schemes = ['http', 'https', 'ftp', 'ftps'] def __init__(self, schemes=None, **kwargs): super().__init__(**kwargs) if schemes is not None: self.schemes = schemes def __call__(self, value): # Check first if the scheme is valid scheme = value.split('://')[0].lower() if scheme not in self.schemes: raise ValidationError(self.message, code=self.code) # Then check full URL try: super().__call__(value) except ValidationError as e: # Trivial case failed. Try for possible IDN domain if value: try: scheme, netloc, path, query, fragment = urlsplit(value) except ValueError: # for example, "Invalid IPv6 URL" raise ValidationError(self.message, code=self.code) try: netloc = punycode(netloc) # IDN -> ACE except UnicodeError: # invalid domain part raise e url = urlunsplit((scheme, netloc, path, query, fragment)) super().__call__(url) else: raise else: # Now verify IPv6 in the netloc part host_match = re.search(r'^\[(.+)\](?::\d{2,5})?$', urlsplit(value).netloc) if host_match: potential_ip = host_match.groups()[0] try: validate_ipv6_address(potential_ip) except ValidationError: raise ValidationError(self.message, code=self.code) # The maximum length of a full host name is 253 characters per RFC 1034 # section 3.1. It's defined to be 255 bytes or less, but this includes # one byte for the length of the name and one byte for the trailing dot # that's used to indicate absolute names in DNS. if len(urlsplit(value).netloc) > 253: raise ValidationError(self.message, code=self.code) integer_validator = RegexValidator( _lazy_re_compile(r'^-?\d+\Z'), message=_('Enter a valid integer.'), code='invalid', ) def validate_integer(value): return integer_validator(value) @deconstructible class EmailValidator: message = _('Enter a valid email address.') code = 'invalid' user_regex = _lazy_re_compile( r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*\Z" # dot-atom r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|\\[\001-\011\013\014\016-\177])*"\Z)', # quoted-string re.IGNORECASE) domain_regex = _lazy_re_compile( # max length for domain name labels is 63 characters per RFC 1034 r'((?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+)(?:[A-Z0-9-]{2,63}(?<!-))\Z', re.IGNORECASE) literal_regex = _lazy_re_compile( # literal form, ipv4 or ipv6 address (SMTP 4.1.3) r'\[([A-f0-9:.]+)\]\Z', re.IGNORECASE) domain_whitelist = ['localhost'] def __init__(self, message=None, code=None, whitelist=None): if message is not None: self.message = message if code is not None: self.code = code if whitelist is not None: self.domain_whitelist = whitelist def __call__(self, value): if not value or '@' not in value: raise ValidationError(self.message, code=self.code) user_part, domain_part = value.rsplit('@', 1) if not self.user_regex.match(user_part): raise ValidationError(self.message, code=self.code) if (domain_part not in self.domain_whitelist and not self.validate_domain_part(domain_part)): # Try for possible IDN domain-part try: domain_part = punycode(domain_part) except UnicodeError: pass else: if self.validate_domain_part(domain_part): return raise ValidationError(self.message, code=self.code) def validate_domain_part(self, domain_part): if self.domain_regex.match(domain_part): return True literal_match = self.literal_regex.match(domain_part) if literal_match: ip_address = literal_match.group(1) try: validate_ipv46_address(ip_address) return True except ValidationError: pass return False def __eq__(self, other): return ( isinstance(other, EmailValidator) and (self.domain_whitelist == other.domain_whitelist) and (self.message == other.message) and (self.code == other.code) ) validate_email = EmailValidator() slug_re = _lazy_re_compile(r'^[-a-zA-Z0-9_]+\Z') validate_slug = RegexValidator( slug_re, # Translators: "letters" means latin letters: a-z and A-Z. _('Enter a valid “slug” consisting of letters, numbers, underscores or hyphens.'), 'invalid' ) slug_unicode_re = _lazy_re_compile(r'^[-\w]+\Z') validate_unicode_slug = RegexValidator( slug_unicode_re, _('Enter a valid “slug” consisting of Unicode letters, numbers, underscores, or hyphens.'), 'invalid' ) def validate_ipv4_address(value): try: ipaddress.IPv4Address(value) except ValueError: raise ValidationError(_('Enter a valid IPv4 address.'), code='invalid') def validate_ipv6_address(value): if not is_valid_ipv6_address(value): raise ValidationError(_('Enter a valid IPv6 address.'), code='invalid') def validate_ipv46_address(value): try: validate_ipv4_address(value) except ValidationError: try: validate_ipv6_address(value) except ValidationError: raise ValidationError(_('Enter a valid IPv4 or IPv6 address.'), code='invalid') ip_address_validator_map = { 'both': ([validate_ipv46_address], _('Enter a valid IPv4 or IPv6 address.')), 'ipv4': ([validate_ipv4_address], _('Enter a valid IPv4 address.')), 'ipv6': ([validate_ipv6_address], _('Enter a valid IPv6 address.')), } def ip_address_validators(protocol, unpack_ipv4): """ Depending on the given parameters, return the appropriate validators for the GenericIPAddressField. """ if protocol != 'both' and unpack_ipv4: raise ValueError( "You can only use `unpack_ipv4` if `protocol` is set to 'both'") try: return ip_address_validator_map[protocol.lower()] except KeyError: raise ValueError("The protocol '%s' is unknown. Supported: %s" % (protocol, list(ip_address_validator_map))) def int_list_validator(sep=',', message=None, code='invalid', allow_negative=False): regexp = _lazy_re_compile(r'^%(neg)s\d+(?:%(sep)s%(neg)s\d+)*\Z' % { 'neg': '(-)?' if allow_negative else '', 'sep': re.escape(sep), }) return RegexValidator(regexp, message=message, code=code) validate_comma_separated_integer_list = int_list_validator( message=_('Enter only digits separated by commas.'), ) @deconstructible class BaseValidator: message = _('Ensure this value is %(limit_value)s (it is %(show_value)s).') code = 'limit_value' def __init__(self, limit_value, message=None): self.limit_value = limit_value if message: self.message = message def __call__(self, value): cleaned = self.clean(value) limit_value = self.limit_value() if callable(self.limit_value) else self.limit_value params = {'limit_value': limit_value, 'show_value': cleaned, 'value': value} if self.compare(cleaned, limit_value): raise ValidationError(self.message, code=self.code, params=params) def __eq__(self, other): if not isinstance(other, self.__class__): return NotImplemented return ( self.limit_value == other.limit_value and self.message == other.message and self.code == other.code ) def compare(self, a, b): return a is not b def clean(self, x): return x @deconstructible class MaxValueValidator(BaseValidator): message = _('Ensure this value is less than or equal to %(limit_value)s.') code = 'max_value' def compare(self, a, b): return a > b @deconstructible class MinValueValidator(BaseValidator): message = _('Ensure this value is greater than or equal to %(limit_value)s.') code = 'min_value' def compare(self, a, b): return a < b @deconstructible class MinLengthValidator(BaseValidator): message = ngettext_lazy( 'Ensure this value has at least %(limit_value)d character (it has %(show_value)d).', 'Ensure this value has at least %(limit_value)d characters (it has %(show_value)d).', 'limit_value') code = 'min_length' def compare(self, a, b): return a < b def clean(self, x): return len(x) @deconstructible class MaxLengthValidator(BaseValidator): message = ngettext_lazy( 'Ensure this value has at most %(limit_value)d character (it has %(show_value)d).', 'Ensure this value has at most %(limit_value)d characters (it has %(show_value)d).', 'limit_value') code = 'max_length' def compare(self, a, b): return a > b def clean(self, x): return len(x) @deconstructible class DecimalValidator: """ Validate that the input does not exceed the maximum number of digits expected, otherwise raise ValidationError. """ messages = { 'invalid': _('Enter a number.'), 'max_digits': ngettext_lazy( 'Ensure that there are no more than %(max)s digit in total.', 'Ensure that there are no more than %(max)s digits in total.', 'max' ), 'max_decimal_places': ngettext_lazy( 'Ensure that there are no more than %(max)s decimal place.', 'Ensure that there are no more than %(max)s decimal places.', 'max' ), 'max_whole_digits': ngettext_lazy( 'Ensure that there are no more than %(max)s digit before the decimal point.', 'Ensure that there are no more than %(max)s digits before the decimal point.', 'max' ), } def __init__(self, max_digits, decimal_places): self.max_digits = max_digits self.decimal_places = decimal_places def __call__(self, value): digit_tuple, exponent = value.as_tuple()[1:] if exponent in {'F', 'n', 'N'}: raise ValidationError(self.messages['invalid']) if exponent >= 0: # A positive exponent adds that many trailing zeros. digits = len(digit_tuple) + exponent decimals = 0 else: # If the absolute value of the negative exponent is larger than the # number of digits, then it's the same as the number of digits, # because it'll consume all of the digits in digit_tuple and then # add abs(exponent) - len(digit_tuple) leading zeros after the # decimal point. if abs(exponent) > len(digit_tuple): digits = decimals = abs(exponent) else: digits = len(digit_tuple) decimals = abs(exponent) whole_digits = digits - decimals if self.max_digits is not None and digits > self.max_digits: raise ValidationError( self.messages['max_digits'], code='max_digits', params={'max': self.max_digits}, ) if self.decimal_places is not None and decimals > self.decimal_places: raise ValidationError( self.messages['max_decimal_places'], code='max_decimal_places', params={'max': self.decimal_places}, ) if (self.max_digits is not None and self.decimal_places is not None and whole_digits > (self.max_digits - self.decimal_places)): raise ValidationError( self.messages['max_whole_digits'], code='max_whole_digits', params={'max': (self.max_digits - self.decimal_places)}, ) def __eq__(self, other): return ( isinstance(other, self.__class__) and self.max_digits == other.max_digits and self.decimal_places == other.decimal_places ) @deconstructible class FileExtensionValidator: message = _( 'File extension “%(extension)s” is not allowed. ' 'Allowed extensions are: %(allowed_extensions)s.' ) code = 'invalid_extension' def __init__(self, allowed_extensions=None, message=None, code=None): if allowed_extensions is not None: allowed_extensions = [allowed_extension.lower() for allowed_extension in allowed_extensions] self.allowed_extensions = allowed_extensions if message is not None: self.message = message if code is not None: self.code = code def __call__(self, value): extension = Path(value.name).suffix[1:].lower() if self.allowed_extensions is not None and extension not in self.allowed_extensions: raise ValidationError( self.message, code=self.code, params={ 'extension': extension, 'allowed_extensions': ', '.join(self.allowed_extensions) } ) def __eq__(self, other): return ( isinstance(other, self.__class__) and self.allowed_extensions == other.allowed_extensions and self.message == other.message and self.code == other.code ) def get_available_image_extensions(): try: from PIL import Image except ImportError: return [] else: Image.init() return [ext.lower()[1:] for ext in Image.EXTENSION] def validate_image_file_extension(value): return FileExtensionValidator(allowed_extensions=get_available_image_extensions())(value) @deconstructible class ProhibitNullCharactersValidator: """Validate that the string doesn't contain the null character.""" message = _('Null characters are not allowed.') code = 'null_characters_not_allowed' def __init__(self, message=None, code=None): if message is not None: self.message = message if code is not None: self.code = code def __call__(self, value): if '\x00' in str(value): raise ValidationError(self.message, code=self.code) def __eq__(self, other): return ( isinstance(other, self.__class__) and self.message == other.message and self.code == other.code )
b41f83e619c2fb543553df53f719636076a6073a04487955e88654cb871ecbdb
""" Multi-part parsing for file uploads. Exposes one class, ``MultiPartParser``, which feeds chunks of uploaded data to file upload handlers for processing. """ import base64 import binascii import cgi import collections import html from urllib.parse import unquote from django.conf import settings from django.core.exceptions import ( RequestDataTooBig, SuspiciousMultipartForm, TooManyFieldsSent, ) from django.core.files.uploadhandler import ( SkipFile, StopFutureHandlers, StopUpload, ) from django.utils.datastructures import MultiValueDict from django.utils.encoding import force_str __all__ = ('MultiPartParser', 'MultiPartParserError', 'InputStreamExhausted') class MultiPartParserError(Exception): pass class InputStreamExhausted(Exception): """ No more reads are allowed from this device. """ pass RAW = "raw" FILE = "file" FIELD = "field" class MultiPartParser: """ A rfc2388 multipart/form-data parser. ``MultiValueDict.parse()`` reads the input stream in ``chunk_size`` chunks and returns a tuple of ``(MultiValueDict(POST), MultiValueDict(FILES))``. """ def __init__(self, META, input_data, upload_handlers, encoding=None): """ Initialize the MultiPartParser object. :META: The standard ``META`` dictionary in Django request objects. :input_data: The raw post data, as a file-like object. :upload_handlers: A list of UploadHandler instances that perform operations on the uploaded data. :encoding: The encoding with which to treat the incoming data. """ # Content-Type should contain multipart and the boundary information. content_type = META.get('CONTENT_TYPE', '') if not content_type.startswith('multipart/'): raise MultiPartParserError('Invalid Content-Type: %s' % content_type) # Parse the header to get the boundary to split the parts. try: ctypes, opts = parse_header(content_type.encode('ascii')) except UnicodeEncodeError: raise MultiPartParserError('Invalid non-ASCII Content-Type in multipart: %s' % force_str(content_type)) boundary = opts.get('boundary') if not boundary or not cgi.valid_boundary(boundary): raise MultiPartParserError('Invalid boundary in multipart: %s' % force_str(boundary)) # Content-Length should contain the length of the body we are about # to receive. try: content_length = int(META.get('CONTENT_LENGTH', 0)) except (ValueError, TypeError): content_length = 0 if content_length < 0: # This means we shouldn't continue...raise an error. raise MultiPartParserError("Invalid content length: %r" % content_length) if isinstance(boundary, str): boundary = boundary.encode('ascii') self._boundary = boundary self._input_data = input_data # For compatibility with low-level network APIs (with 32-bit integers), # the chunk size should be < 2^31, but still divisible by 4. possible_sizes = [x.chunk_size for x in upload_handlers if x.chunk_size] self._chunk_size = min([2 ** 31 - 4] + possible_sizes) self._meta = META self._encoding = encoding or settings.DEFAULT_CHARSET self._content_length = content_length self._upload_handlers = upload_handlers def parse(self): """ Parse the POST data and break it into a FILES MultiValueDict and a POST MultiValueDict. Return a tuple containing the POST and FILES dictionary, respectively. """ from django.http import QueryDict encoding = self._encoding handlers = self._upload_handlers # HTTP spec says that Content-Length >= 0 is valid # handling content-length == 0 before continuing if self._content_length == 0: return QueryDict(encoding=self._encoding), MultiValueDict() # See if any of the handlers take care of the parsing. # This allows overriding everything if need be. for handler in handlers: result = handler.handle_raw_input( self._input_data, self._meta, self._content_length, self._boundary, encoding, ) # Check to see if it was handled if result is not None: return result[0], result[1] # Create the data structures to be used later. self._post = QueryDict(mutable=True) self._files = MultiValueDict() # Instantiate the parser and stream: stream = LazyStream(ChunkIter(self._input_data, self._chunk_size)) # Whether or not to signal a file-completion at the beginning of the loop. old_field_name = None counters = [0] * len(handlers) # Number of bytes that have been read. num_bytes_read = 0 # To count the number of keys in the request. num_post_keys = 0 # To limit the amount of data read from the request. read_size = None try: for item_type, meta_data, field_stream in Parser(stream, self._boundary): if old_field_name: # We run this at the beginning of the next loop # since we cannot be sure a file is complete until # we hit the next boundary/part of the multipart content. self.handle_file_complete(old_field_name, counters) old_field_name = None try: disposition = meta_data['content-disposition'][1] field_name = disposition['name'].strip() except (KeyError, IndexError, AttributeError): continue transfer_encoding = meta_data.get('content-transfer-encoding') if transfer_encoding is not None: transfer_encoding = transfer_encoding[0].strip() field_name = force_str(field_name, encoding, errors='replace') if item_type == FIELD: # Avoid storing more than DATA_UPLOAD_MAX_NUMBER_FIELDS. num_post_keys += 1 if (settings.DATA_UPLOAD_MAX_NUMBER_FIELDS is not None and settings.DATA_UPLOAD_MAX_NUMBER_FIELDS < num_post_keys): raise TooManyFieldsSent( 'The number of GET/POST parameters exceeded ' 'settings.DATA_UPLOAD_MAX_NUMBER_FIELDS.' ) # Avoid reading more than DATA_UPLOAD_MAX_MEMORY_SIZE. if settings.DATA_UPLOAD_MAX_MEMORY_SIZE is not None: read_size = settings.DATA_UPLOAD_MAX_MEMORY_SIZE - num_bytes_read # This is a post field, we can just set it in the post if transfer_encoding == 'base64': raw_data = field_stream.read(size=read_size) num_bytes_read += len(raw_data) try: data = base64.b64decode(raw_data) except binascii.Error: data = raw_data else: data = field_stream.read(size=read_size) num_bytes_read += len(data) # Add two here to make the check consistent with the # x-www-form-urlencoded check that includes '&='. num_bytes_read += len(field_name) + 2 if (settings.DATA_UPLOAD_MAX_MEMORY_SIZE is not None and num_bytes_read > settings.DATA_UPLOAD_MAX_MEMORY_SIZE): raise RequestDataTooBig('Request body exceeded settings.DATA_UPLOAD_MAX_MEMORY_SIZE.') self._post.appendlist(field_name, force_str(data, encoding, errors='replace')) elif item_type == FILE: # This is a file, use the handler... file_name = disposition.get('filename') if file_name: file_name = force_str(file_name, encoding, errors='replace') file_name = self.IE_sanitize(html.unescape(file_name)) if not file_name: continue content_type, content_type_extra = meta_data.get('content-type', ('', {})) content_type = content_type.strip() charset = content_type_extra.get('charset') try: content_length = int(meta_data.get('content-length')[0]) except (IndexError, TypeError, ValueError): content_length = None counters = [0] * len(handlers) try: for handler in handlers: try: handler.new_file( field_name, file_name, content_type, content_length, charset, content_type_extra, ) except StopFutureHandlers: break for chunk in field_stream: if transfer_encoding == 'base64': # We only special-case base64 transfer encoding # We should always decode base64 chunks by multiple of 4, # ignoring whitespace. stripped_chunk = b"".join(chunk.split()) remaining = len(stripped_chunk) % 4 while remaining != 0: over_chunk = field_stream.read(4 - remaining) stripped_chunk += b"".join(over_chunk.split()) remaining = len(stripped_chunk) % 4 try: chunk = base64.b64decode(stripped_chunk) except Exception as exc: # Since this is only a chunk, any error is an unfixable error. raise MultiPartParserError("Could not decode base64 data.") from exc for i, handler in enumerate(handlers): chunk_length = len(chunk) chunk = handler.receive_data_chunk(chunk, counters[i]) counters[i] += chunk_length if chunk is None: # Don't continue if the chunk received by # the handler is None. break except SkipFile: self._close_files() # Just use up the rest of this file... exhaust(field_stream) else: # Handle file upload completions on next iteration. old_field_name = field_name else: # If this is neither a FIELD or a FILE, just exhaust the stream. exhaust(stream) except StopUpload as e: self._close_files() if not e.connection_reset: exhaust(self._input_data) else: # Make sure that the request data is all fed exhaust(self._input_data) # Signal that the upload has completed. # any() shortcircuits if a handler's upload_complete() returns a value. any(handler.upload_complete() for handler in handlers) self._post._mutable = False return self._post, self._files def handle_file_complete(self, old_field_name, counters): """ Handle all the signaling that takes place when a file is complete. """ for i, handler in enumerate(self._upload_handlers): file_obj = handler.file_complete(counters[i]) if file_obj: # If it returns a file object, then set the files dict. self._files.appendlist(force_str(old_field_name, self._encoding, errors='replace'), file_obj) break def IE_sanitize(self, filename): """Cleanup filename from Internet Explorer full paths.""" return filename and filename[filename.rfind("\\") + 1:].strip() def _close_files(self): # Free up all file handles. # FIXME: this currently assumes that upload handlers store the file as 'file' # We should document that... (Maybe add handler.free_file to complement new_file) for handler in self._upload_handlers: if hasattr(handler, 'file'): handler.file.close() class LazyStream: """ The LazyStream wrapper allows one to get and "unget" bytes from a stream. Given a producer object (an iterator that yields bytestrings), the LazyStream object will support iteration, reading, and keeping a "look-back" variable in case you need to "unget" some bytes. """ def __init__(self, producer, length=None): """ Every LazyStream must have a producer when instantiated. A producer is an iterable that returns a string each time it is called. """ self._producer = producer self._empty = False self._leftover = b'' self.length = length self.position = 0 self._remaining = length self._unget_history = [] def tell(self): return self.position def read(self, size=None): def parts(): remaining = self._remaining if size is None else size # do the whole thing in one shot if no limit was provided. if remaining is None: yield b''.join(self) return # otherwise do some bookkeeping to return exactly enough # of the stream and stashing any extra content we get from # the producer while remaining != 0: assert remaining > 0, 'remaining bytes to read should never go negative' try: chunk = next(self) except StopIteration: return else: emitting = chunk[:remaining] self.unget(chunk[remaining:]) remaining -= len(emitting) yield emitting return b''.join(parts()) def __next__(self): """ Used when the exact number of bytes to read is unimportant. Return whatever chunk is conveniently returned from the iterator. Useful to avoid unnecessary bookkeeping if performance is an issue. """ if self._leftover: output = self._leftover self._leftover = b'' else: output = next(self._producer) self._unget_history = [] self.position += len(output) return output def close(self): """ Used to invalidate/disable this lazy stream. Replace the producer with an empty list. Any leftover bytes that have already been read will still be reported upon read() and/or next(). """ self._producer = [] def __iter__(self): return self def unget(self, bytes): """ Place bytes back onto the front of the lazy stream. Future calls to read() will return those bytes first. The stream position and thus tell() will be rewound. """ if not bytes: return self._update_unget_history(len(bytes)) self.position -= len(bytes) self._leftover = bytes + self._leftover def _update_unget_history(self, num_bytes): """ Update the unget history as a sanity check to see if we've pushed back the same number of bytes in one chunk. If we keep ungetting the same number of bytes many times (here, 50), we're mostly likely in an infinite loop of some sort. This is usually caused by a maliciously-malformed MIME request. """ self._unget_history = [num_bytes] + self._unget_history[:49] number_equal = len([ current_number for current_number in self._unget_history if current_number == num_bytes ]) if number_equal > 40: raise SuspiciousMultipartForm( "The multipart parser got stuck, which shouldn't happen with" " normal uploaded files. Check for malicious upload activity;" " if there is none, report this to the Django developers." ) class ChunkIter: """ An iterable that will yield chunks of data. Given a file-like object as the constructor, yield chunks of read operations from that object. """ def __init__(self, flo, chunk_size=64 * 1024): self.flo = flo self.chunk_size = chunk_size def __next__(self): try: data = self.flo.read(self.chunk_size) except InputStreamExhausted: raise StopIteration() if data: return data else: raise StopIteration() def __iter__(self): return self class InterBoundaryIter: """ A Producer that will iterate over boundaries. """ def __init__(self, stream, boundary): self._stream = stream self._boundary = boundary def __iter__(self): return self def __next__(self): try: return LazyStream(BoundaryIter(self._stream, self._boundary)) except InputStreamExhausted: raise StopIteration() class BoundaryIter: """ A Producer that is sensitive to boundaries. Will happily yield bytes until a boundary is found. Will yield the bytes before the boundary, throw away the boundary bytes themselves, and push the post-boundary bytes back on the stream. The future calls to next() after locating the boundary will raise a StopIteration exception. """ def __init__(self, stream, boundary): self._stream = stream self._boundary = boundary self._done = False # rollback an additional six bytes because the format is like # this: CRLF<boundary>[--CRLF] self._rollback = len(boundary) + 6 # Try to use mx fast string search if available. Otherwise # use Python find. Wrap the latter for consistency. unused_char = self._stream.read(1) if not unused_char: raise InputStreamExhausted() self._stream.unget(unused_char) def __iter__(self): return self def __next__(self): if self._done: raise StopIteration() stream = self._stream rollback = self._rollback bytes_read = 0 chunks = [] for bytes in stream: bytes_read += len(bytes) chunks.append(bytes) if bytes_read > rollback: break if not bytes: break else: self._done = True if not chunks: raise StopIteration() chunk = b''.join(chunks) boundary = self._find_boundary(chunk) if boundary: end, next = boundary stream.unget(chunk[next:]) self._done = True return chunk[:end] else: # make sure we don't treat a partial boundary (and # its separators) as data if not chunk[:-rollback]: # and len(chunk) >= (len(self._boundary) + 6): # There's nothing left, we should just return and mark as done. self._done = True return chunk else: stream.unget(chunk[-rollback:]) return chunk[:-rollback] def _find_boundary(self, data): """ Find a multipart boundary in data. Should no boundary exist in the data, return None. Otherwise, return a tuple containing the indices of the following: * the end of current encapsulation * the start of the next encapsulation """ index = data.find(self._boundary) if index < 0: return None else: end = index next = index + len(self._boundary) # backup over CRLF last = max(0, end - 1) if data[last:last + 1] == b'\n': end -= 1 last = max(0, end - 1) if data[last:last + 1] == b'\r': end -= 1 return end, next def exhaust(stream_or_iterable): """Exhaust an iterator or stream.""" try: iterator = iter(stream_or_iterable) except TypeError: iterator = ChunkIter(stream_or_iterable, 16384) collections.deque(iterator, maxlen=0) # consume iterator quickly. def parse_boundary_stream(stream, max_header_size): """ Parse one and exactly one stream that encapsulates a boundary. """ # Stream at beginning of header, look for end of header # and parse it if found. The header must fit within one # chunk. chunk = stream.read(max_header_size) # 'find' returns the top of these four bytes, so we'll # need to munch them later to prevent them from polluting # the payload. header_end = chunk.find(b'\r\n\r\n') def _parse_header(line): main_value_pair, params = parse_header(line) try: name, value = main_value_pair.split(':', 1) except ValueError: raise ValueError("Invalid header: %r" % line) return name, (value, params) if header_end == -1: # we find no header, so we just mark this fact and pass on # the stream verbatim stream.unget(chunk) return (RAW, {}, stream) header = chunk[:header_end] # here we place any excess chunk back onto the stream, as # well as throwing away the CRLFCRLF bytes from above. stream.unget(chunk[header_end + 4:]) TYPE = RAW outdict = {} # Eliminate blank lines for line in header.split(b'\r\n'): # This terminology ("main value" and "dictionary of # parameters") is from the Python docs. try: name, (value, params) = _parse_header(line) except ValueError: continue if name == 'content-disposition': TYPE = FIELD if params.get('filename'): TYPE = FILE outdict[name] = value, params if TYPE == RAW: stream.unget(chunk) return (TYPE, outdict, stream) class Parser: def __init__(self, stream, boundary): self._stream = stream self._separator = b'--' + boundary def __iter__(self): boundarystream = InterBoundaryIter(self._stream, self._separator) for sub_stream in boundarystream: # Iterate over each part yield parse_boundary_stream(sub_stream, 1024) def parse_header(line): """ Parse the header into a key-value. Input (line): bytes, output: str for key/name, bytes for values which will be decoded later. """ plist = _parse_header_params(b';' + line) key = plist.pop(0).lower().decode('ascii') pdict = {} for p in plist: i = p.find(b'=') if i >= 0: has_encoding = False name = p[:i].strip().lower().decode('ascii') if name.endswith('*'): # Lang/encoding embedded in the value (like "filename*=UTF-8''file.ext") # http://tools.ietf.org/html/rfc2231#section-4 name = name[:-1] if p.count(b"'") == 2: has_encoding = True value = p[i + 1:].strip() if len(value) >= 2 and value[:1] == value[-1:] == b'"': value = value[1:-1] value = value.replace(b'\\\\', b'\\').replace(b'\\"', b'"') if has_encoding: encoding, lang, value = value.split(b"'") value = unquote(value.decode(), encoding=encoding.decode()) pdict[name] = value return key, pdict def _parse_header_params(s): plist = [] while s[:1] == b';': s = s[1:] end = s.find(b';') while end > 0 and s.count(b'"', 0, end) % 2: end = s.find(b';', end + 1) if end < 0: end = len(s) f = s[:end] plist.append(f.strip()) s = s[end:] return plist
58e385a9312b3c64c99d8bd8aeabe42bd54b7ce47964bfa1a53cbabbcd5df108
import datetime import json import mimetypes import os import re import sys import time from email.header import Header from http.client import responses from urllib.parse import quote, urlparse from django.conf import settings from django.core import signals, signing from django.core.exceptions import DisallowedRedirect from django.core.serializers.json import DjangoJSONEncoder from django.http.cookie import SimpleCookie from django.utils import timezone from django.utils.encoding import iri_to_uri from django.utils.http import http_date from django.utils.regex_helper import _lazy_re_compile _charset_from_content_type_re = _lazy_re_compile(r';\s*charset=(?P<charset>[^\s;]+)', re.I) class BadHeaderError(ValueError): pass class HttpResponseBase: """ An HTTP response base class with dictionary-accessed headers. This class doesn't handle content. It should not be used directly. Use the HttpResponse and StreamingHttpResponse subclasses instead. """ status_code = 200 def __init__(self, content_type=None, status=None, reason=None, charset=None): # _headers is a mapping of the lowercase name to the original case of # the header (required for working with legacy systems) and the header # value. Both the name of the header and its value are ASCII strings. self._headers = {} self._resource_closers = [] # This parameter is set by the handler. It's necessary to preserve the # historical behavior of request_finished. self._handler_class = None self.cookies = SimpleCookie() self.closed = False if status is not None: try: self.status_code = int(status) except (ValueError, TypeError): raise TypeError('HTTP status code must be an integer.') if not 100 <= self.status_code <= 599: raise ValueError('HTTP status code must be an integer from 100 to 599.') self._reason_phrase = reason self._charset = charset if content_type is None: content_type = 'text/html; charset=%s' % self.charset self['Content-Type'] = content_type @property def reason_phrase(self): if self._reason_phrase is not None: return self._reason_phrase # Leave self._reason_phrase unset in order to use the default # reason phrase for status code. return responses.get(self.status_code, 'Unknown Status Code') @reason_phrase.setter def reason_phrase(self, value): self._reason_phrase = value @property def charset(self): if self._charset is not None: return self._charset content_type = self.get('Content-Type', '') matched = _charset_from_content_type_re.search(content_type) if matched: # Extract the charset and strip its double quotes return matched.group('charset').replace('"', '') return settings.DEFAULT_CHARSET @charset.setter def charset(self, value): self._charset = value def serialize_headers(self): """HTTP headers as a bytestring.""" def to_bytes(val, encoding): return val if isinstance(val, bytes) else val.encode(encoding) headers = [ (to_bytes(key, 'ascii') + b': ' + to_bytes(value, 'latin-1')) for key, value in self._headers.values() ] return b'\r\n'.join(headers) __bytes__ = serialize_headers @property def _content_type_for_repr(self): return ', "%s"' % self['Content-Type'] if 'Content-Type' in self else '' def _convert_to_charset(self, value, charset, mime_encode=False): """ Convert headers key/value to ascii/latin-1 native strings. `charset` must be 'ascii' or 'latin-1'. If `mime_encode` is True and `value` can't be represented in the given charset, apply MIME-encoding. """ if not isinstance(value, (bytes, str)): value = str(value) if ((isinstance(value, bytes) and (b'\n' in value or b'\r' in value)) or isinstance(value, str) and ('\n' in value or '\r' in value)): raise BadHeaderError("Header values can't contain newlines (got %r)" % value) try: if isinstance(value, str): # Ensure string is valid in given charset value.encode(charset) else: # Convert bytestring using given charset value = value.decode(charset) except UnicodeError as e: if mime_encode: value = Header(value, 'utf-8', maxlinelen=sys.maxsize).encode() else: e.reason += ', HTTP response headers must be in %s format' % charset raise return value def __setitem__(self, header, value): header = self._convert_to_charset(header, 'ascii') value = self._convert_to_charset(value, 'latin-1', mime_encode=True) self._headers[header.lower()] = (header, value) def __delitem__(self, header): self._headers.pop(header.lower(), False) def __getitem__(self, header): return self._headers[header.lower()][1] def has_header(self, header): """Case-insensitive check for a header.""" return header.lower() in self._headers __contains__ = has_header def items(self): return self._headers.values() def get(self, header, alternate=None): return self._headers.get(header.lower(), (None, alternate))[1] def set_cookie(self, key, value='', max_age=None, expires=None, path='/', domain=None, secure=False, httponly=False, samesite=None): """ Set a cookie. ``expires`` can be: - a string in the correct format, - a naive ``datetime.datetime`` object in UTC, - an aware ``datetime.datetime`` object in any time zone. If it is a ``datetime.datetime`` object then calculate ``max_age``. """ self.cookies[key] = value if expires is not None: if isinstance(expires, datetime.datetime): if timezone.is_aware(expires): expires = timezone.make_naive(expires, timezone.utc) delta = expires - expires.utcnow() # Add one second so the date matches exactly (a fraction of # time gets lost between converting to a timedelta and # then the date string). delta = delta + datetime.timedelta(seconds=1) # Just set max_age - the max_age logic will set expires. expires = None max_age = max(0, delta.days * 86400 + delta.seconds) else: self.cookies[key]['expires'] = expires else: self.cookies[key]['expires'] = '' if max_age is not None: self.cookies[key]['max-age'] = max_age # IE requires expires, so set it if hasn't been already. if not expires: self.cookies[key]['expires'] = http_date(time.time() + max_age) if path is not None: self.cookies[key]['path'] = path if domain is not None: self.cookies[key]['domain'] = domain if secure: self.cookies[key]['secure'] = True if httponly: self.cookies[key]['httponly'] = True if samesite: if samesite.lower() not in ('lax', 'none', 'strict'): raise ValueError('samesite must be "lax", "none", or "strict".') self.cookies[key]['samesite'] = samesite def setdefault(self, key, value): """Set a header unless it has already been set.""" if key not in self: self[key] = value def set_signed_cookie(self, key, value, salt='', **kwargs): value = signing.get_cookie_signer(salt=key + salt).sign(value) return self.set_cookie(key, value, **kwargs) def delete_cookie(self, key, path='/', domain=None): # Most browsers ignore the Set-Cookie header if the cookie name starts # with __Host- or __Secure- and the cookie doesn't use the secure flag. secure = key.startswith(('__Secure-', '__Host-')) self.set_cookie( key, max_age=0, path=path, domain=domain, secure=secure, expires='Thu, 01 Jan 1970 00:00:00 GMT', ) # Common methods used by subclasses def make_bytes(self, value): """Turn a value into a bytestring encoded in the output charset.""" # Per PEP 3333, this response body must be bytes. To avoid returning # an instance of a subclass, this function returns `bytes(value)`. # This doesn't make a copy when `value` already contains bytes. # Handle string types -- we can't rely on force_bytes here because: # - Python attempts str conversion first # - when self._charset != 'utf-8' it re-encodes the content if isinstance(value, (bytes, memoryview)): return bytes(value) if isinstance(value, str): return bytes(value.encode(self.charset)) # Handle non-string types. return str(value).encode(self.charset) # These methods partially implement the file-like object interface. # See https://docs.python.org/library/io.html#io.IOBase # The WSGI server must call this method upon completion of the request. # See http://blog.dscpl.com.au/2012/10/obligations-for-calling-close-on.html def close(self): for closer in self._resource_closers: try: closer() except Exception: pass # Free resources that were still referenced. self._resource_closers.clear() self.closed = True signals.request_finished.send(sender=self._handler_class) def write(self, content): raise OSError('This %s instance is not writable' % self.__class__.__name__) def flush(self): pass def tell(self): raise OSError('This %s instance cannot tell its position' % self.__class__.__name__) # These methods partially implement a stream-like object interface. # See https://docs.python.org/library/io.html#io.IOBase def readable(self): return False def seekable(self): return False def writable(self): return False def writelines(self, lines): raise OSError('This %s instance is not writable' % self.__class__.__name__) class HttpResponse(HttpResponseBase): """ An HTTP response class with a string as content. This content that can be read, appended to, or replaced. """ streaming = False def __init__(self, content=b'', *args, **kwargs): super().__init__(*args, **kwargs) # Content is a bytestring. See the `content` property methods. self.content = content def __repr__(self): return '<%(cls)s status_code=%(status_code)d%(content_type)s>' % { 'cls': self.__class__.__name__, 'status_code': self.status_code, 'content_type': self._content_type_for_repr, } def serialize(self): """Full HTTP message, including headers, as a bytestring.""" return self.serialize_headers() + b'\r\n\r\n' + self.content __bytes__ = serialize @property def content(self): return b''.join(self._container) @content.setter def content(self, value): # Consume iterators upon assignment to allow repeated iteration. if hasattr(value, '__iter__') and not isinstance(value, (bytes, str)): content = b''.join(self.make_bytes(chunk) for chunk in value) if hasattr(value, 'close'): try: value.close() except Exception: pass else: content = self.make_bytes(value) # Create a list of properly encoded bytestrings to support write(). self._container = [content] def __iter__(self): return iter(self._container) def write(self, content): self._container.append(self.make_bytes(content)) def tell(self): return len(self.content) def getvalue(self): return self.content def writable(self): return True def writelines(self, lines): for line in lines: self.write(line) class StreamingHttpResponse(HttpResponseBase): """ A streaming HTTP response class with an iterator as content. This should only be iterated once, when the response is streamed to the client. However, it can be appended to or replaced with a new iterator that wraps the original content (or yields entirely new content). """ streaming = True def __init__(self, streaming_content=(), *args, **kwargs): super().__init__(*args, **kwargs) # `streaming_content` should be an iterable of bytestrings. # See the `streaming_content` property methods. self.streaming_content = streaming_content @property def content(self): raise AttributeError( "This %s instance has no `content` attribute. Use " "`streaming_content` instead." % self.__class__.__name__ ) @property def streaming_content(self): return map(self.make_bytes, self._iterator) @streaming_content.setter def streaming_content(self, value): self._set_streaming_content(value) def _set_streaming_content(self, value): # Ensure we can never iterate on "value" more than once. self._iterator = iter(value) if hasattr(value, 'close'): self._resource_closers.append(value.close) def __iter__(self): return self.streaming_content def getvalue(self): return b''.join(self.streaming_content) class FileResponse(StreamingHttpResponse): """ A streaming HTTP response class optimized for files. """ block_size = 4096 def __init__(self, *args, as_attachment=False, filename='', **kwargs): self.as_attachment = as_attachment self.filename = filename super().__init__(*args, **kwargs) def _set_streaming_content(self, value): if not hasattr(value, 'read'): self.file_to_stream = None return super()._set_streaming_content(value) self.file_to_stream = filelike = value if hasattr(filelike, 'close'): self._resource_closers.append(filelike.close) value = iter(lambda: filelike.read(self.block_size), b'') self.set_headers(filelike) super()._set_streaming_content(value) def set_headers(self, filelike): """ Set some common response headers (Content-Length, Content-Type, and Content-Disposition) based on the `filelike` response content. """ encoding_map = { 'bzip2': 'application/x-bzip', 'gzip': 'application/gzip', 'xz': 'application/x-xz', } filename = getattr(filelike, 'name', None) filename = filename if (isinstance(filename, str) and filename) else self.filename if os.path.isabs(filename): self['Content-Length'] = os.path.getsize(filelike.name) elif hasattr(filelike, 'getbuffer'): self['Content-Length'] = filelike.getbuffer().nbytes if self.get('Content-Type', '').startswith('text/html'): if filename: content_type, encoding = mimetypes.guess_type(filename) # Encoding isn't set to prevent browsers from automatically # uncompressing files. content_type = encoding_map.get(encoding, content_type) self['Content-Type'] = content_type or 'application/octet-stream' else: self['Content-Type'] = 'application/octet-stream' filename = self.filename or os.path.basename(filename) if filename: disposition = 'attachment' if self.as_attachment else 'inline' try: filename.encode('ascii') file_expr = 'filename="{}"'.format(filename) except UnicodeEncodeError: file_expr = "filename*=utf-8''{}".format(quote(filename)) self['Content-Disposition'] = '{}; {}'.format(disposition, file_expr) elif self.as_attachment: self['Content-Disposition'] = 'attachment' class HttpResponseRedirectBase(HttpResponse): allowed_schemes = ['http', 'https', 'ftp'] def __init__(self, redirect_to, *args, **kwargs): super().__init__(*args, **kwargs) self['Location'] = iri_to_uri(redirect_to) parsed = urlparse(str(redirect_to)) if parsed.scheme and parsed.scheme not in self.allowed_schemes: raise DisallowedRedirect("Unsafe redirect to URL with protocol '%s'" % parsed.scheme) url = property(lambda self: self['Location']) def __repr__(self): return '<%(cls)s status_code=%(status_code)d%(content_type)s, url="%(url)s">' % { 'cls': self.__class__.__name__, 'status_code': self.status_code, 'content_type': self._content_type_for_repr, 'url': self.url, } class HttpResponseRedirect(HttpResponseRedirectBase): status_code = 302 class HttpResponsePermanentRedirect(HttpResponseRedirectBase): status_code = 301 class HttpResponseNotModified(HttpResponse): status_code = 304 def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) del self['content-type'] @HttpResponse.content.setter def content(self, value): if value: raise AttributeError("You cannot set content to a 304 (Not Modified) response") self._container = [] class HttpResponseBadRequest(HttpResponse): status_code = 400 class HttpResponseNotFound(HttpResponse): status_code = 404 class HttpResponseForbidden(HttpResponse): status_code = 403 class HttpResponseNotAllowed(HttpResponse): status_code = 405 def __init__(self, permitted_methods, *args, **kwargs): super().__init__(*args, **kwargs) self['Allow'] = ', '.join(permitted_methods) def __repr__(self): return '<%(cls)s [%(methods)s] status_code=%(status_code)d%(content_type)s>' % { 'cls': self.__class__.__name__, 'status_code': self.status_code, 'content_type': self._content_type_for_repr, 'methods': self['Allow'], } class HttpResponseGone(HttpResponse): status_code = 410 class HttpResponseServerError(HttpResponse): status_code = 500 class Http404(Exception): pass class JsonResponse(HttpResponse): """ An HTTP response class that consumes data to be serialized to JSON. :param data: Data to be dumped into json. By default only ``dict`` objects are allowed to be passed due to a security flaw before EcmaScript 5. See the ``safe`` parameter for more information. :param encoder: Should be a json encoder class. Defaults to ``django.core.serializers.json.DjangoJSONEncoder``. :param safe: Controls if only ``dict`` objects may be serialized. Defaults to ``True``. :param json_dumps_params: A dictionary of kwargs passed to json.dumps(). """ def __init__(self, data, encoder=DjangoJSONEncoder, safe=True, json_dumps_params=None, **kwargs): if safe and not isinstance(data, dict): raise TypeError( 'In order to allow non-dict objects to be serialized set the ' 'safe parameter to False.' ) if json_dumps_params is None: json_dumps_params = {} kwargs.setdefault('content_type', 'application/json') data = json.dumps(data, cls=encoder, **json_dumps_params) super().__init__(content=data, **kwargs)
eb5f89b7b98097f751a672b9aa4da59622ebafc59d7c76760d14e86a90958e8f
import cgi import codecs import copy import warnings from io import BytesIO from itertools import chain from urllib.parse import quote, urlencode, urljoin, urlsplit from django.conf import settings from django.core import signing from django.core.exceptions import ( DisallowedHost, ImproperlyConfigured, RequestDataTooBig, ) from django.core.files import uploadhandler from django.http.multipartparser import MultiPartParser, MultiPartParserError from django.utils.datastructures import ( CaseInsensitiveMapping, ImmutableList, MultiValueDict, ) from django.utils.deprecation import RemovedInDjango40Warning from django.utils.encoding import escape_uri_path, iri_to_uri from django.utils.functional import cached_property from django.utils.http import is_same_domain, limited_parse_qsl from django.utils.regex_helper import _lazy_re_compile from .multipartparser import parse_header RAISE_ERROR = object() host_validation_re = _lazy_re_compile(r"^([a-z0-9.-]+|\[[a-f0-9]*:[a-f0-9\.:]+\])(:\d+)?$") class UnreadablePostError(OSError): pass class RawPostDataException(Exception): """ You cannot access raw_post_data from a request that has multipart/* POST data if it has been accessed via POST, FILES, etc.. """ pass class HttpRequest: """A basic HTTP request.""" # The encoding used in GET/POST dicts. None means use default setting. _encoding = None _upload_handlers = [] def __init__(self): # WARNING: The `WSGIRequest` subclass doesn't call `super`. # Any variable assignment made here should also happen in # `WSGIRequest.__init__()`. self.GET = QueryDict(mutable=True) self.POST = QueryDict(mutable=True) self.COOKIES = {} self.META = {} self.FILES = MultiValueDict() self.path = '' self.path_info = '' self.method = None self.resolver_match = None self.content_type = None self.content_params = None def __repr__(self): if self.method is None or not self.get_full_path(): return '<%s>' % self.__class__.__name__ return '<%s: %s %r>' % (self.__class__.__name__, self.method, self.get_full_path()) @cached_property def headers(self): return HttpHeaders(self.META) @cached_property def accepted_types(self): """Return a list of MediaType instances.""" return parse_accept_header(self.headers.get('Accept', '*/*')) def accepts(self, media_type): return any( accepted_type.match(media_type) for accepted_type in self.accepted_types ) def _set_content_type_params(self, meta): """Set content_type, content_params, and encoding.""" self.content_type, self.content_params = cgi.parse_header(meta.get('CONTENT_TYPE', '')) if 'charset' in self.content_params: try: codecs.lookup(self.content_params['charset']) except LookupError: pass else: self.encoding = self.content_params['charset'] def _get_raw_host(self): """ Return the HTTP host using the environment or request headers. Skip allowed hosts protection, so may return an insecure host. """ # We try three options, in order of decreasing preference. if settings.USE_X_FORWARDED_HOST and ( 'HTTP_X_FORWARDED_HOST' in self.META): host = self.META['HTTP_X_FORWARDED_HOST'] elif 'HTTP_HOST' in self.META: host = self.META['HTTP_HOST'] else: # Reconstruct the host using the algorithm from PEP 333. host = self.META['SERVER_NAME'] server_port = self.get_port() if server_port != ('443' if self.is_secure() else '80'): host = '%s:%s' % (host, server_port) return host def get_host(self): """Return the HTTP host using the environment or request headers.""" host = self._get_raw_host() # Allow variants of localhost if ALLOWED_HOSTS is empty and DEBUG=True. allowed_hosts = settings.ALLOWED_HOSTS if settings.DEBUG and not allowed_hosts: allowed_hosts = ['.localhost', '127.0.0.1', '[::1]'] domain, port = split_domain_port(host) if domain and validate_host(domain, allowed_hosts): return host else: msg = "Invalid HTTP_HOST header: %r." % host if domain: msg += " You may need to add %r to ALLOWED_HOSTS." % domain else: msg += " The domain name provided is not valid according to RFC 1034/1035." raise DisallowedHost(msg) def get_port(self): """Return the port number for the request as a string.""" if settings.USE_X_FORWARDED_PORT and 'HTTP_X_FORWARDED_PORT' in self.META: port = self.META['HTTP_X_FORWARDED_PORT'] else: port = self.META['SERVER_PORT'] return str(port) def get_full_path(self, force_append_slash=False): return self._get_full_path(self.path, force_append_slash) def get_full_path_info(self, force_append_slash=False): return self._get_full_path(self.path_info, force_append_slash) def _get_full_path(self, path, force_append_slash): # RFC 3986 requires query string arguments to be in the ASCII range. # Rather than crash if this doesn't happen, we encode defensively. return '%s%s%s' % ( escape_uri_path(path), '/' if force_append_slash and not path.endswith('/') else '', ('?' + iri_to_uri(self.META.get('QUERY_STRING', ''))) if self.META.get('QUERY_STRING', '') else '' ) def get_signed_cookie(self, key, default=RAISE_ERROR, salt='', max_age=None): """ Attempt to return a signed cookie. If the signature fails or the cookie has expired, raise an exception, unless the `default` argument is provided, in which case return that value. """ try: cookie_value = self.COOKIES[key] except KeyError: if default is not RAISE_ERROR: return default else: raise try: value = signing.get_cookie_signer(salt=key + salt).unsign( cookie_value, max_age=max_age) except signing.BadSignature: if default is not RAISE_ERROR: return default else: raise return value def get_raw_uri(self): """ Return an absolute URI from variables available in this request. Skip allowed hosts protection, so may return insecure URI. """ return '{scheme}://{host}{path}'.format( scheme=self.scheme, host=self._get_raw_host(), path=self.get_full_path(), ) def build_absolute_uri(self, location=None): """ Build an absolute URI from the location and the variables available in this request. If no ``location`` is specified, build the absolute URI using request.get_full_path(). If the location is absolute, convert it to an RFC 3987 compliant URI and return it. If location is relative or is scheme-relative (i.e., ``//example.com/``), urljoin() it to a base URL constructed from the request variables. """ if location is None: # Make it an absolute url (but schemeless and domainless) for the # edge case that the path starts with '//'. location = '//%s' % self.get_full_path() else: # Coerce lazy locations. location = str(location) bits = urlsplit(location) if not (bits.scheme and bits.netloc): # Handle the simple, most common case. If the location is absolute # and a scheme or host (netloc) isn't provided, skip an expensive # urljoin() as long as no path segments are '.' or '..'. if (bits.path.startswith('/') and not bits.scheme and not bits.netloc and '/./' not in bits.path and '/../' not in bits.path): # If location starts with '//' but has no netloc, reuse the # schema and netloc from the current request. Strip the double # slashes and continue as if it wasn't specified. if location.startswith('//'): location = location[2:] location = self._current_scheme_host + location else: # Join the constructed URL with the provided location, which # allows the provided location to apply query strings to the # base path. location = urljoin(self._current_scheme_host + self.path, location) return iri_to_uri(location) @cached_property def _current_scheme_host(self): return '{}://{}'.format(self.scheme, self.get_host()) def _get_scheme(self): """ Hook for subclasses like WSGIRequest to implement. Return 'http' by default. """ return 'http' @property def scheme(self): if settings.SECURE_PROXY_SSL_HEADER: try: header, secure_value = settings.SECURE_PROXY_SSL_HEADER except ValueError: raise ImproperlyConfigured( 'The SECURE_PROXY_SSL_HEADER setting must be a tuple containing two values.' ) header_value = self.META.get(header) if header_value is not None: return 'https' if header_value == secure_value else 'http' return self._get_scheme() def is_secure(self): return self.scheme == 'https' def is_ajax(self): warnings.warn( 'request.is_ajax() is deprecated. See Django 3.1 release notes ' 'for more details about this deprecation.', RemovedInDjango40Warning, ) return self.META.get('HTTP_X_REQUESTED_WITH') == 'XMLHttpRequest' @property def encoding(self): return self._encoding @encoding.setter def encoding(self, val): """ Set the encoding used for GET/POST accesses. If the GET or POST dictionary has already been created, remove and recreate it on the next access (so that it is decoded correctly). """ self._encoding = val if hasattr(self, 'GET'): del self.GET if hasattr(self, '_post'): del self._post def _initialize_handlers(self): self._upload_handlers = [uploadhandler.load_handler(handler, self) for handler in settings.FILE_UPLOAD_HANDLERS] @property def upload_handlers(self): if not self._upload_handlers: # If there are no upload handlers defined, initialize them from settings. self._initialize_handlers() return self._upload_handlers @upload_handlers.setter def upload_handlers(self, upload_handlers): if hasattr(self, '_files'): raise AttributeError("You cannot set the upload handlers after the upload has been processed.") self._upload_handlers = upload_handlers def parse_file_upload(self, META, post_data): """Return a tuple of (POST QueryDict, FILES MultiValueDict).""" self.upload_handlers = ImmutableList( self.upload_handlers, warning="You cannot alter upload handlers after the upload has been processed." ) parser = MultiPartParser(META, post_data, self.upload_handlers, self.encoding) return parser.parse() @property def body(self): if not hasattr(self, '_body'): if self._read_started: raise RawPostDataException("You cannot access body after reading from request's data stream") # Limit the maximum request data size that will be handled in-memory. if (settings.DATA_UPLOAD_MAX_MEMORY_SIZE is not None and int(self.META.get('CONTENT_LENGTH') or 0) > settings.DATA_UPLOAD_MAX_MEMORY_SIZE): raise RequestDataTooBig('Request body exceeded settings.DATA_UPLOAD_MAX_MEMORY_SIZE.') try: self._body = self.read() except OSError as e: raise UnreadablePostError(*e.args) from e self._stream = BytesIO(self._body) return self._body def _mark_post_parse_error(self): self._post = QueryDict() self._files = MultiValueDict() def _load_post_and_files(self): """Populate self._post and self._files if the content-type is a form type""" if self.method != 'POST': self._post, self._files = QueryDict(encoding=self._encoding), MultiValueDict() return if self._read_started and not hasattr(self, '_body'): self._mark_post_parse_error() return if self.content_type == 'multipart/form-data': if hasattr(self, '_body'): # Use already read data data = BytesIO(self._body) else: data = self try: self._post, self._files = self.parse_file_upload(self.META, data) except MultiPartParserError: # An error occurred while parsing POST data. Since when # formatting the error the request handler might access # self.POST, set self._post and self._file to prevent # attempts to parse POST data again. self._mark_post_parse_error() raise elif self.content_type == 'application/x-www-form-urlencoded': self._post, self._files = QueryDict(self.body, encoding=self._encoding), MultiValueDict() else: self._post, self._files = QueryDict(encoding=self._encoding), MultiValueDict() def close(self): if hasattr(self, '_files'): for f in chain.from_iterable(l[1] for l in self._files.lists()): f.close() # File-like and iterator interface. # # Expects self._stream to be set to an appropriate source of bytes by # a corresponding request subclass (e.g. WSGIRequest). # Also when request data has already been read by request.POST or # request.body, self._stream points to a BytesIO instance # containing that data. def read(self, *args, **kwargs): self._read_started = True try: return self._stream.read(*args, **kwargs) except OSError as e: raise UnreadablePostError(*e.args) from e def readline(self, *args, **kwargs): self._read_started = True try: return self._stream.readline(*args, **kwargs) except OSError as e: raise UnreadablePostError(*e.args) from e def __iter__(self): return iter(self.readline, b'') def readlines(self): return list(self) class HttpHeaders(CaseInsensitiveMapping): HTTP_PREFIX = 'HTTP_' # PEP 333 gives two headers which aren't prepended with HTTP_. UNPREFIXED_HEADERS = {'CONTENT_TYPE', 'CONTENT_LENGTH'} def __init__(self, environ): headers = {} for header, value in environ.items(): name = self.parse_header_name(header) if name: headers[name] = value super().__init__(headers) def __getitem__(self, key): """Allow header lookup using underscores in place of hyphens.""" return super().__getitem__(key.replace('_', '-')) @classmethod def parse_header_name(cls, header): if header.startswith(cls.HTTP_PREFIX): header = header[len(cls.HTTP_PREFIX):] elif header not in cls.UNPREFIXED_HEADERS: return None return header.replace('_', '-').title() class QueryDict(MultiValueDict): """ A specialized MultiValueDict which represents a query string. A QueryDict can be used to represent GET or POST data. It subclasses MultiValueDict since keys in such data can be repeated, for instance in the data from a form with a <select multiple> field. By default QueryDicts are immutable, though the copy() method will always return a mutable copy. Both keys and values set on this class are converted from the given encoding (DEFAULT_CHARSET by default) to str. """ # These are both reset in __init__, but is specified here at the class # level so that unpickling will have valid values _mutable = True _encoding = None def __init__(self, query_string=None, mutable=False, encoding=None): super().__init__() self.encoding = encoding or settings.DEFAULT_CHARSET query_string = query_string or '' parse_qsl_kwargs = { 'keep_blank_values': True, 'fields_limit': settings.DATA_UPLOAD_MAX_NUMBER_FIELDS, 'encoding': self.encoding, } if isinstance(query_string, bytes): # query_string normally contains URL-encoded data, a subset of ASCII. try: query_string = query_string.decode(self.encoding) except UnicodeDecodeError: # ... but some user agents are misbehaving :-( query_string = query_string.decode('iso-8859-1') for key, value in limited_parse_qsl(query_string, **parse_qsl_kwargs): self.appendlist(key, value) self._mutable = mutable @classmethod def fromkeys(cls, iterable, value='', mutable=False, encoding=None): """ Return a new QueryDict with keys (may be repeated) from an iterable and values from value. """ q = cls('', mutable=True, encoding=encoding) for key in iterable: q.appendlist(key, value) if not mutable: q._mutable = False return q @property def encoding(self): if self._encoding is None: self._encoding = settings.DEFAULT_CHARSET return self._encoding @encoding.setter def encoding(self, value): self._encoding = value def _assert_mutable(self): if not self._mutable: raise AttributeError("This QueryDict instance is immutable") def __setitem__(self, key, value): self._assert_mutable() key = bytes_to_text(key, self.encoding) value = bytes_to_text(value, self.encoding) super().__setitem__(key, value) def __delitem__(self, key): self._assert_mutable() super().__delitem__(key) def __copy__(self): result = self.__class__('', mutable=True, encoding=self.encoding) for key, value in self.lists(): result.setlist(key, value) return result def __deepcopy__(self, memo): result = self.__class__('', mutable=True, encoding=self.encoding) memo[id(self)] = result for key, value in self.lists(): result.setlist(copy.deepcopy(key, memo), copy.deepcopy(value, memo)) return result def setlist(self, key, list_): self._assert_mutable() key = bytes_to_text(key, self.encoding) list_ = [bytes_to_text(elt, self.encoding) for elt in list_] super().setlist(key, list_) def setlistdefault(self, key, default_list=None): self._assert_mutable() return super().setlistdefault(key, default_list) def appendlist(self, key, value): self._assert_mutable() key = bytes_to_text(key, self.encoding) value = bytes_to_text(value, self.encoding) super().appendlist(key, value) def pop(self, key, *args): self._assert_mutable() return super().pop(key, *args) def popitem(self): self._assert_mutable() return super().popitem() def clear(self): self._assert_mutable() super().clear() def setdefault(self, key, default=None): self._assert_mutable() key = bytes_to_text(key, self.encoding) default = bytes_to_text(default, self.encoding) return super().setdefault(key, default) def copy(self): """Return a mutable copy of this object.""" return self.__deepcopy__({}) def urlencode(self, safe=None): """ Return an encoded string of all query string arguments. `safe` specifies characters which don't require quoting, for example:: >>> q = QueryDict(mutable=True) >>> q['next'] = '/a&b/' >>> q.urlencode() 'next=%2Fa%26b%2F' >>> q.urlencode(safe='/') 'next=/a%26b/' """ output = [] if safe: safe = safe.encode(self.encoding) def encode(k, v): return '%s=%s' % ((quote(k, safe), quote(v, safe))) else: def encode(k, v): return urlencode({k: v}) for k, list_ in self.lists(): output.extend( encode(k.encode(self.encoding), str(v).encode(self.encoding)) for v in list_ ) return '&'.join(output) class MediaType: def __init__(self, media_type_raw_line): full_type, self.params = parse_header( media_type_raw_line.encode('ascii') if media_type_raw_line else b'' ) self.main_type, _, self.sub_type = full_type.partition('/') def __str__(self): params_str = ''.join( '; %s=%s' % (k, v.decode('ascii')) for k, v in self.params.items() ) return '%s%s%s' % ( self.main_type, ('/%s' % self.sub_type) if self.sub_type else '', params_str, ) def __repr__(self): return '<%s: %s>' % (self.__class__.__qualname__, self) @property def is_all_types(self): return self.main_type == '*' and self.sub_type == '*' def match(self, other): if self.is_all_types: return True other = MediaType(other) if self.main_type == other.main_type and self.sub_type in {'*', other.sub_type}: return True return False # It's neither necessary nor appropriate to use # django.utils.encoding.force_str() for parsing URLs and form inputs. Thus, # this slightly more restricted function, used by QueryDict. def bytes_to_text(s, encoding): """ Convert bytes objects to strings, using the given encoding. Illegally encoded input characters are replaced with Unicode "unknown" codepoint (\ufffd). Return any non-bytes objects without change. """ if isinstance(s, bytes): return str(s, encoding, 'replace') else: return s def split_domain_port(host): """ Return a (domain, port) tuple from a given host. Returned domain is lowercased. If the host is invalid, the domain will be empty. """ host = host.lower() if not host_validation_re.match(host): return '', '' if host[-1] == ']': # It's an IPv6 address without a port. return host, '' bits = host.rsplit(':', 1) domain, port = bits if len(bits) == 2 else (bits[0], '') # Remove a trailing dot (if present) from the domain. domain = domain[:-1] if domain.endswith('.') else domain return domain, port def validate_host(host, allowed_hosts): """ Validate the given host for this site. Check that the host looks valid and matches a host or host pattern in the given list of ``allowed_hosts``. Any pattern beginning with a period matches a domain and all its subdomains (e.g. ``.example.com`` matches ``example.com`` and any subdomain), ``*`` matches anything, and anything else must match exactly. Note: This function assumes that the given host is lowercased and has already had the port, if any, stripped off. Return ``True`` for a valid host, ``False`` otherwise. """ return any(pattern == '*' or is_same_domain(host, pattern) for pattern in allowed_hosts) def parse_accept_header(header): return [MediaType(token) for token in header.split(',') if token.strip()]
6c13d73b1effc99a20910c29ffcd8092302d9f8f648088c9bf07037e65d65fda
from functools import wraps from django.middleware.cache import CacheMiddleware from django.utils.cache import add_never_cache_headers, patch_cache_control from django.utils.decorators import decorator_from_middleware_with_args def cache_page(timeout, *, cache=None, key_prefix=None): """ Decorator for views that tries getting the page from the cache and populates the cache if the page isn't in the cache yet. The cache is keyed by the URL and some data from the headers. Additionally there is the key prefix that is used to distinguish different cache areas in a multi-site setup. You could use the get_current_site().domain, for example, as that is unique across a Django project. Additionally, all headers from the response's Vary header will be taken into account on caching -- just like the middleware does. """ return decorator_from_middleware_with_args(CacheMiddleware)( page_timeout=timeout, cache_alias=cache, key_prefix=key_prefix, ) def cache_control(**kwargs): def _cache_controller(viewfunc): @wraps(viewfunc) def _cache_controlled(request, *args, **kw): response = viewfunc(request, *args, **kw) patch_cache_control(response, **kwargs) return response return _cache_controlled return _cache_controller def never_cache(view_func): """ Decorator that adds headers to a response so that it will never be cached. """ @wraps(view_func) def _wrapped_view_func(request, *args, **kwargs): response = view_func(request, *args, **kwargs) add_never_cache_headers(response) return response return _wrapped_view_func
255e4590fa10bb233c06b0c6ae65c05e7b3290c6a591bcdee645415abd04084b
import logging import warnings from functools import update_wrapper from django.core.exceptions import ImproperlyConfigured from django.http import ( HttpResponse, HttpResponseGone, HttpResponseNotAllowed, HttpResponsePermanentRedirect, HttpResponseRedirect, ) from django.template.response import TemplateResponse from django.urls import reverse from django.utils.decorators import classonlymethod from django.utils.deprecation import RemovedInDjango40Warning from django.utils.functional import SimpleLazyObject logger = logging.getLogger('django.request') class ContextMixin: """ A default context mixin that passes the keyword arguments received by get_context_data() as the template context. """ extra_context = None def get_context_data(self, **kwargs): kwargs.setdefault('view', self) if self.extra_context is not None: kwargs.update(self.extra_context) return kwargs class View: """ Intentionally simple parent class for all views. Only implements dispatch-by-method and simple sanity checking. """ http_method_names = ['get', 'post', 'put', 'patch', 'delete', 'head', 'options', 'trace'] def __init__(self, **kwargs): """ Constructor. Called in the URLconf; can contain helpful extra keyword arguments, and other things. """ # Go through keyword arguments, and either save their values to our # instance, or raise an error. for key, value in kwargs.items(): setattr(self, key, value) @classonlymethod def as_view(cls, **initkwargs): """Main entry point for a request-response process.""" for key in initkwargs: if key in cls.http_method_names: raise TypeError( 'The method name %s is not accepted as a keyword argument ' 'to %s().' % (key, cls.__name__) ) if not hasattr(cls, key): raise TypeError("%s() received an invalid keyword %r. as_view " "only accepts arguments that are already " "attributes of the class." % (cls.__name__, key)) def view(request, *args, **kwargs): self = cls(**initkwargs) self.setup(request, *args, **kwargs) if not hasattr(self, 'request'): raise AttributeError( "%s instance has no 'request' attribute. Did you override " "setup() and forget to call super()?" % cls.__name__ ) return self.dispatch(request, *args, **kwargs) view.view_class = cls view.view_initkwargs = initkwargs # take name and docstring from class update_wrapper(view, cls, updated=()) # and possible attributes set by decorators # like csrf_exempt from dispatch update_wrapper(view, cls.dispatch, assigned=()) return view def setup(self, request, *args, **kwargs): """Initialize attributes shared by all view methods.""" if hasattr(self, 'get') and not hasattr(self, 'head'): self.head = self.get self.request = request self.args = args self.kwargs = kwargs def dispatch(self, request, *args, **kwargs): # Try to dispatch to the right method; if a method doesn't exist, # defer to the error handler. Also defer to the error handler if the # request method isn't on the approved list. if request.method.lower() in self.http_method_names: handler = getattr(self, request.method.lower(), self.http_method_not_allowed) else: handler = self.http_method_not_allowed return handler(request, *args, **kwargs) def http_method_not_allowed(self, request, *args, **kwargs): logger.warning( 'Method Not Allowed (%s): %s', request.method, request.path, extra={'status_code': 405, 'request': request} ) return HttpResponseNotAllowed(self._allowed_methods()) def options(self, request, *args, **kwargs): """Handle responding to requests for the OPTIONS HTTP verb.""" response = HttpResponse() response['Allow'] = ', '.join(self._allowed_methods()) response['Content-Length'] = '0' return response def _allowed_methods(self): return [m.upper() for m in self.http_method_names if hasattr(self, m)] class TemplateResponseMixin: """A mixin that can be used to render a template.""" template_name = None template_engine = None response_class = TemplateResponse content_type = None def render_to_response(self, context, **response_kwargs): """ Return a response, using the `response_class` for this view, with a template rendered with the given context. Pass response_kwargs to the constructor of the response class. """ response_kwargs.setdefault('content_type', self.content_type) return self.response_class( request=self.request, template=self.get_template_names(), context=context, using=self.template_engine, **response_kwargs ) def get_template_names(self): """ Return a list of template names to be used for the request. Must return a list. May not be called if render_to_response() is overridden. """ if self.template_name is None: raise ImproperlyConfigured( "TemplateResponseMixin requires either a definition of " "'template_name' or an implementation of 'get_template_names()'") else: return [self.template_name] class TemplateView(TemplateResponseMixin, ContextMixin, View): """Render a template.""" def get(self, request, *args, **kwargs): # RemovedInDjango40Warning: when the deprecation ends, replace with: # context = self.get_context_data() context_kwargs = _wrap_url_kwargs_with_deprecation_warning(kwargs) context = self.get_context_data(**context_kwargs) return self.render_to_response(context) # RemovedInDjango40Warning def _wrap_url_kwargs_with_deprecation_warning(url_kwargs): context_kwargs = {} for key, value in url_kwargs.items(): # Bind into function closure. @SimpleLazyObject def access_value(key=key, value=value): warnings.warn( 'TemplateView passing URL kwargs to the context is ' 'deprecated. Reference %s in your template through ' 'view.kwargs instead.' % key, RemovedInDjango40Warning, stacklevel=2, ) return value context_kwargs[key] = access_value return context_kwargs class RedirectView(View): """Provide a redirect on any GET request.""" permanent = False url = None pattern_name = None query_string = False def get_redirect_url(self, *args, **kwargs): """ Return the URL redirect to. Keyword arguments from the URL pattern match generating the redirect request are provided as kwargs to this method. """ if self.url: url = self.url % kwargs elif self.pattern_name: url = reverse(self.pattern_name, args=args, kwargs=kwargs) else: return None args = self.request.META.get('QUERY_STRING', '') if args and self.query_string: url = "%s?%s" % (url, args) return url def get(self, request, *args, **kwargs): url = self.get_redirect_url(*args, **kwargs) if url: if self.permanent: return HttpResponsePermanentRedirect(url) else: return HttpResponseRedirect(url) else: logger.warning( 'Gone: %s', request.path, extra={'status_code': 410, 'request': request} ) return HttpResponseGone() def head(self, request, *args, **kwargs): return self.get(request, *args, **kwargs) def post(self, request, *args, **kwargs): return self.get(request, *args, **kwargs) def options(self, request, *args, **kwargs): return self.get(request, *args, **kwargs) def delete(self, request, *args, **kwargs): return self.get(request, *args, **kwargs) def put(self, request, *args, **kwargs): return self.get(request, *args, **kwargs) def patch(self, request, *args, **kwargs): return self.get(request, *args, **kwargs)
c210daa4759073ae4bfa3b2958331e833186a3aa49ff8a58b03486436bee9308
from django.core.exceptions import ImproperlyConfigured from django.core.paginator import InvalidPage, Paginator from django.db.models import QuerySet from django.http import Http404 from django.utils.translation import gettext as _ from django.views.generic.base import ContextMixin, TemplateResponseMixin, View class MultipleObjectMixin(ContextMixin): """A mixin for views manipulating multiple objects.""" allow_empty = True queryset = None model = None paginate_by = None paginate_orphans = 0 context_object_name = None paginator_class = Paginator page_kwarg = 'page' ordering = None def get_queryset(self): """ Return the list of items for this view. The return value must be an iterable and may be an instance of `QuerySet` in which case `QuerySet` specific behavior will be enabled. """ if self.queryset is not None: queryset = self.queryset if isinstance(queryset, QuerySet): queryset = queryset.all() elif self.model is not None: queryset = self.model._default_manager.all() else: raise ImproperlyConfigured( "%(cls)s is missing a QuerySet. Define " "%(cls)s.model, %(cls)s.queryset, or override " "%(cls)s.get_queryset()." % { 'cls': self.__class__.__name__ } ) ordering = self.get_ordering() if ordering: if isinstance(ordering, str): ordering = (ordering,) queryset = queryset.order_by(*ordering) return queryset def get_ordering(self): """Return the field or fields to use for ordering the queryset.""" return self.ordering def paginate_queryset(self, queryset, page_size): """Paginate the queryset, if needed.""" paginator = self.get_paginator( queryset, page_size, orphans=self.get_paginate_orphans(), allow_empty_first_page=self.get_allow_empty()) page_kwarg = self.page_kwarg page = self.kwargs.get(page_kwarg) or self.request.GET.get(page_kwarg) or 1 try: page_number = int(page) except ValueError: if page == 'last': page_number = paginator.num_pages else: raise Http404(_('Page is not “last”, nor can it be converted to an int.')) try: page = paginator.page(page_number) return (paginator, page, page.object_list, page.has_other_pages()) except InvalidPage as e: raise Http404(_('Invalid page (%(page_number)s): %(message)s') % { 'page_number': page_number, 'message': str(e) }) def get_paginate_by(self, queryset): """ Get the number of items to paginate by, or ``None`` for no pagination. """ return self.paginate_by def get_paginator(self, queryset, per_page, orphans=0, allow_empty_first_page=True, **kwargs): """Return an instance of the paginator for this view.""" return self.paginator_class( queryset, per_page, orphans=orphans, allow_empty_first_page=allow_empty_first_page, **kwargs) def get_paginate_orphans(self): """ Return the maximum number of orphans extend the last page by when paginating. """ return self.paginate_orphans def get_allow_empty(self): """ Return ``True`` if the view should display empty lists and ``False`` if a 404 should be raised instead. """ return self.allow_empty def get_context_object_name(self, object_list): """Get the name of the item to be used in the context.""" if self.context_object_name: return self.context_object_name elif hasattr(object_list, 'model'): return '%s_list' % object_list.model._meta.model_name else: return None def get_context_data(self, *, object_list=None, **kwargs): """Get the context for this view.""" queryset = object_list if object_list is not None else self.object_list page_size = self.get_paginate_by(queryset) context_object_name = self.get_context_object_name(queryset) if page_size: paginator, page, queryset, is_paginated = self.paginate_queryset(queryset, page_size) context = { 'paginator': paginator, 'page_obj': page, 'is_paginated': is_paginated, 'object_list': queryset } else: context = { 'paginator': None, 'page_obj': None, 'is_paginated': False, 'object_list': queryset } if context_object_name is not None: context[context_object_name] = queryset context.update(kwargs) return super().get_context_data(**context) class BaseListView(MultipleObjectMixin, View): """A base view for displaying a list of objects.""" def get(self, request, *args, **kwargs): self.object_list = self.get_queryset() allow_empty = self.get_allow_empty() if not allow_empty: # When pagination is enabled and object_list is a queryset, # it's better to do a cheap query than to load the unpaginated # queryset in memory. if self.get_paginate_by(self.object_list) is not None and hasattr(self.object_list, 'exists'): is_empty = not self.object_list.exists() else: is_empty = not self.object_list if is_empty: raise Http404(_('Empty list and “%(class_name)s.allow_empty” is False.') % { 'class_name': self.__class__.__name__, }) context = self.get_context_data() return self.render_to_response(context) class MultipleObjectTemplateResponseMixin(TemplateResponseMixin): """Mixin for responding with a template and list of objects.""" template_name_suffix = '_list' def get_template_names(self): """ Return a list of template names to be used for the request. Must return a list. May not be called if render_to_response is overridden. """ try: names = super().get_template_names() except ImproperlyConfigured: # If template_name isn't specified, it's not a problem -- # we just start with an empty list. names = [] # If the list is a queryset, we'll invent a template name based on the # app and model name. This name gets put at the end of the template # name list so that user-supplied names override the automatically- # generated ones. if hasattr(self.object_list, 'model'): opts = self.object_list.model._meta names.append("%s/%s%s.html" % (opts.app_label, opts.model_name, self.template_name_suffix)) elif not names: raise ImproperlyConfigured( "%(cls)s requires either a 'template_name' attribute " "or a get_queryset() method that returns a QuerySet." % { 'cls': self.__class__.__name__, } ) return names class ListView(MultipleObjectTemplateResponseMixin, BaseListView): """ Render some list of objects, set by `self.model` or `self.queryset`. `self.queryset` can actually be any iterable of items, not just a queryset. """
5fa5b23ba1e13a8f27483985d9dc197c9fda986cf6cff2c26cfccb5470619f11
import warnings from django.urls import include, re_path from django.utils.deprecation import RemovedInDjango40Warning from django.views import defaults __all__ = ['handler400', 'handler403', 'handler404', 'handler500', 'include', 'url'] handler400 = defaults.bad_request handler403 = defaults.permission_denied handler404 = defaults.page_not_found handler500 = defaults.server_error def url(regex, view, kwargs=None, name=None): warnings.warn( 'django.conf.urls.url() is deprecated in favor of ' 'django.urls.re_path().', RemovedInDjango40Warning, ) return re_path(regex, view, kwargs, name)
a98a152f05d89126cfd770c6b7cc4768dcebbbdbfeeeb865fef52ba5dcf7028b
# This file is distributed under the same license as the Django package. # # The *_FORMAT strings use the Django date format syntax, # see https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date DATE_FORMAT = 'j F Y' TIME_FORMAT = 'H:i' DATETIME_FORMAT = 'j F Y H:i' YEAR_MONTH_FORMAT = 'F Y' MONTH_DAY_FORMAT = 'j F' SHORT_DATE_FORMAT = 'j F Y' SHORT_DATETIME_FORMAT = 'j F Y H:i' FIRST_DAY_OF_WEEK = 0 # Sunday # The *_INPUT_FORMATS strings use the Python strftime format syntax, # see https://docs.python.org/library/datetime.html#strftime-strptime-behavior DATE_INPUT_FORMATS = [ '%Y/%m/%d', # '2006/10/25' ] TIME_INPUT_FORMATS = [ '%H:%M', # '14:30 '%H:%M:%S', # '14:30:59' ] DATETIME_INPUT_FORMATS = [ '%Y/%m/%d %H:%M', # '2006/10/25 14:30' '%Y/%m/%d %H:%M:%S', # '2006/10/25 14:30:59' ] DECIMAL_SEPARATOR = ',' THOUSAND_SEPARATOR = '.' NUMBER_GROUPING = 3
53f62d11c0673c83fd4448ecf435de6ea4bc57052de7048b8035ac1c569a2554
"""Translation helper functions.""" import functools import gettext as gettext_module import os import re import sys import warnings from asgiref.local import Local from django.apps import apps from django.conf import settings from django.conf.locale import LANG_INFO from django.core.exceptions import AppRegistryNotReady from django.core.signals import setting_changed from django.dispatch import receiver from django.utils.regex_helper import _lazy_re_compile from django.utils.safestring import SafeData, mark_safe from . import to_language, to_locale # Translations are cached in a dictionary for every language. # The active translations are stored by threadid to make them thread local. _translations = {} _active = Local() # The default translation is based on the settings file. _default = None # magic gettext number to separate context from message CONTEXT_SEPARATOR = "\x04" # Format of Accept-Language header values. From RFC 2616, section 14.4 and 3.9 # and RFC 3066, section 2.1 accept_language_re = _lazy_re_compile(r''' ([A-Za-z]{1,8}(?:-[A-Za-z0-9]{1,8})*|\*) # "en", "en-au", "x-y-z", "es-419", "*" (?:\s*;\s*q=(0(?:\.\d{,3})?|1(?:\.0{,3})?))? # Optional "q=1.00", "q=0.8" (?:\s*,\s*|$) # Multiple accepts per header. ''', re.VERBOSE) language_code_re = _lazy_re_compile( r'^[a-z]{1,8}(?:-[a-z0-9]{1,8})*(?:@[a-z0-9]{1,20})?$', re.IGNORECASE ) language_code_prefix_re = _lazy_re_compile(r'^/(\w+([@-]\w+)?)(/|$)') @receiver(setting_changed) def reset_cache(**kwargs): """ Reset global state when LANGUAGES setting has been changed, as some languages should no longer be accepted. """ if kwargs['setting'] in ('LANGUAGES', 'LANGUAGE_CODE'): check_for_language.cache_clear() get_languages.cache_clear() get_supported_language_variant.cache_clear() class TranslationCatalog: """ Simulate a dict for DjangoTranslation._catalog so as multiple catalogs with different plural equations are kept separate. """ def __init__(self, trans=None): self._catalogs = [trans._catalog.copy()] if trans else [{}] self._plurals = [trans.plural] if trans else [lambda n: int(n != 1)] def __getitem__(self, key): for cat in self._catalogs: try: return cat[key] except KeyError: pass raise KeyError(key) def __setitem__(self, key, value): self._catalogs[0][key] = value def __contains__(self, key): return any(key in cat for cat in self._catalogs) def items(self): for cat in self._catalogs: yield from cat.items() def keys(self): for cat in self._catalogs: yield from cat.keys() def update(self, trans): # Merge if plural function is the same, else prepend. for cat, plural in zip(self._catalogs, self._plurals): if trans.plural.__code__ == plural.__code__: cat.update(trans._catalog) break else: self._catalogs.insert(0, trans._catalog) self._plurals.insert(0, trans.plural) def get(self, key, default=None): missing = object() for cat in self._catalogs: result = cat.get(key, missing) if result is not missing: return result return default def plural(self, msgid, num): for cat, plural in zip(self._catalogs, self._plurals): tmsg = cat.get((msgid, plural(num))) if tmsg is not None: return tmsg raise KeyError class DjangoTranslation(gettext_module.GNUTranslations): """ Set up the GNUTranslations context with regard to output charset. This translation object will be constructed out of multiple GNUTranslations objects by merging their catalogs. It will construct an object for the requested language and add a fallback to the default language, if it's different from the requested language. """ domain = 'django' def __init__(self, language, domain=None, localedirs=None): """Create a GNUTranslations() using many locale directories""" gettext_module.GNUTranslations.__init__(self) if domain is not None: self.domain = domain self.__language = language self.__to_language = to_language(language) self.__locale = to_locale(language) self._catalog = None # If a language doesn't have a catalog, use the Germanic default for # pluralization: anything except one is pluralized. self.plural = lambda n: int(n != 1) if self.domain == 'django': if localedirs is not None: # A module-level cache is used for caching 'django' translations warnings.warn("localedirs is ignored when domain is 'django'.", RuntimeWarning) localedirs = None self._init_translation_catalog() if localedirs: for localedir in localedirs: translation = self._new_gnu_trans(localedir) self.merge(translation) else: self._add_installed_apps_translations() self._add_local_translations() if self.__language == settings.LANGUAGE_CODE and self.domain == 'django' and self._catalog is None: # default lang should have at least one translation file available. raise OSError('No translation files found for default language %s.' % settings.LANGUAGE_CODE) self._add_fallback(localedirs) if self._catalog is None: # No catalogs found for this language, set an empty catalog. self._catalog = TranslationCatalog() def __repr__(self): return "<DjangoTranslation lang:%s>" % self.__language def _new_gnu_trans(self, localedir, use_null_fallback=True): """ Return a mergeable gettext.GNUTranslations instance. A convenience wrapper. By default gettext uses 'fallback=False'. Using param `use_null_fallback` to avoid confusion with any other references to 'fallback'. """ return gettext_module.translation( domain=self.domain, localedir=localedir, languages=[self.__locale], fallback=use_null_fallback, ) def _init_translation_catalog(self): """Create a base catalog using global django translations.""" settingsfile = sys.modules[settings.__module__].__file__ localedir = os.path.join(os.path.dirname(settingsfile), 'locale') translation = self._new_gnu_trans(localedir) self.merge(translation) def _add_installed_apps_translations(self): """Merge translations from each installed app.""" try: app_configs = reversed(list(apps.get_app_configs())) except AppRegistryNotReady: raise AppRegistryNotReady( "The translation infrastructure cannot be initialized before the " "apps registry is ready. Check that you don't make non-lazy " "gettext calls at import time.") for app_config in app_configs: localedir = os.path.join(app_config.path, 'locale') if os.path.exists(localedir): translation = self._new_gnu_trans(localedir) self.merge(translation) def _add_local_translations(self): """Merge translations defined in LOCALE_PATHS.""" for localedir in reversed(settings.LOCALE_PATHS): translation = self._new_gnu_trans(localedir) self.merge(translation) def _add_fallback(self, localedirs=None): """Set the GNUTranslations() fallback with the default language.""" # Don't set a fallback for the default language or any English variant # (as it's empty, so it'll ALWAYS fall back to the default language) if self.__language == settings.LANGUAGE_CODE or self.__language.startswith('en'): return if self.domain == 'django': # Get from cache default_translation = translation(settings.LANGUAGE_CODE) else: default_translation = DjangoTranslation( settings.LANGUAGE_CODE, domain=self.domain, localedirs=localedirs ) self.add_fallback(default_translation) def merge(self, other): """Merge another translation into this catalog.""" if not getattr(other, '_catalog', None): return # NullTranslations() has no _catalog if self._catalog is None: # Take plural and _info from first catalog found (generally Django's). self.plural = other.plural self._info = other._info.copy() self._catalog = TranslationCatalog(other) else: self._catalog.update(other) if other._fallback: self.add_fallback(other._fallback) def language(self): """Return the translation language.""" return self.__language def to_language(self): """Return the translation language name.""" return self.__to_language def ngettext(self, msgid1, msgid2, n): try: tmsg = self._catalog.plural(msgid1, n) except KeyError: if self._fallback: return self._fallback.ngettext(msgid1, msgid2, n) if n == 1: tmsg = msgid1 else: tmsg = msgid2 return tmsg def translation(language): """ Return a translation object in the default 'django' domain. """ global _translations if language not in _translations: _translations[language] = DjangoTranslation(language) return _translations[language] def activate(language): """ Fetch the translation object for a given language and install it as the current translation object for the current thread. """ if not language: return _active.value = translation(language) def deactivate(): """ Uninstall the active translation object so that further _() calls resolve to the default translation object. """ if hasattr(_active, "value"): del _active.value def deactivate_all(): """ Make the active translation object a NullTranslations() instance. This is useful when we want delayed translations to appear as the original string for some reason. """ _active.value = gettext_module.NullTranslations() _active.value.to_language = lambda *args: None def get_language(): """Return the currently selected language.""" t = getattr(_active, "value", None) if t is not None: try: return t.to_language() except AttributeError: pass # If we don't have a real translation object, assume it's the default language. return settings.LANGUAGE_CODE def get_language_bidi(): """ Return selected language's BiDi layout. * False = left-to-right layout * True = right-to-left layout """ lang = get_language() if lang is None: return False else: base_lang = get_language().split('-')[0] return base_lang in settings.LANGUAGES_BIDI def catalog(): """ Return the current active catalog for further processing. This can be used if you need to modify the catalog or want to access the whole message catalog instead of just translating one string. """ global _default t = getattr(_active, "value", None) if t is not None: return t if _default is None: _default = translation(settings.LANGUAGE_CODE) return _default def gettext(message): """ Translate the 'message' string. It uses the current thread to find the translation object to use. If no current translation is activated, the message will be run through the default translation object. """ global _default eol_message = message.replace('\r\n', '\n').replace('\r', '\n') if eol_message: _default = _default or translation(settings.LANGUAGE_CODE) translation_object = getattr(_active, "value", _default) result = translation_object.gettext(eol_message) else: # Return an empty value of the corresponding type if an empty message # is given, instead of metadata, which is the default gettext behavior. result = type(message)('') if isinstance(message, SafeData): return mark_safe(result) return result def pgettext(context, message): msg_with_ctxt = "%s%s%s" % (context, CONTEXT_SEPARATOR, message) result = gettext(msg_with_ctxt) if CONTEXT_SEPARATOR in result: # Translation not found result = message elif isinstance(message, SafeData): result = mark_safe(result) return result def gettext_noop(message): """ Mark strings for translation but don't translate them now. This can be used to store strings in global variables that should stay in the base language (because they might be used externally) and will be translated later. """ return message def do_ntranslate(singular, plural, number, translation_function): global _default t = getattr(_active, "value", None) if t is not None: return getattr(t, translation_function)(singular, plural, number) if _default is None: _default = translation(settings.LANGUAGE_CODE) return getattr(_default, translation_function)(singular, plural, number) def ngettext(singular, plural, number): """ Return a string of the translation of either the singular or plural, based on the number. """ return do_ntranslate(singular, plural, number, 'ngettext') def npgettext(context, singular, plural, number): msgs_with_ctxt = ("%s%s%s" % (context, CONTEXT_SEPARATOR, singular), "%s%s%s" % (context, CONTEXT_SEPARATOR, plural), number) result = ngettext(*msgs_with_ctxt) if CONTEXT_SEPARATOR in result: # Translation not found result = ngettext(singular, plural, number) return result def all_locale_paths(): """ Return a list of paths to user-provides languages files. """ globalpath = os.path.join( os.path.dirname(sys.modules[settings.__module__].__file__), 'locale') app_paths = [] for app_config in apps.get_app_configs(): locale_path = os.path.join(app_config.path, 'locale') if os.path.exists(locale_path): app_paths.append(locale_path) return [globalpath, *settings.LOCALE_PATHS, *app_paths] @functools.lru_cache(maxsize=1000) def check_for_language(lang_code): """ Check whether there is a global language file for the given language code. This is used to decide whether a user-provided language is available. lru_cache should have a maxsize to prevent from memory exhaustion attacks, as the provided language codes are taken from the HTTP request. See also <https://www.djangoproject.com/weblog/2007/oct/26/security-fix/>. """ # First, a quick check to make sure lang_code is well-formed (#21458) if lang_code is None or not language_code_re.search(lang_code): return False return any( gettext_module.find('django', path, [to_locale(lang_code)]) is not None for path in all_locale_paths() ) @functools.lru_cache() def get_languages(): """ Cache of settings.LANGUAGES in a dictionary for easy lookups by key. """ return dict(settings.LANGUAGES) @functools.lru_cache(maxsize=1000) def get_supported_language_variant(lang_code, strict=False): """ Return the language code that's listed in supported languages, possibly selecting a more generic variant. Raise LookupError if nothing is found. If `strict` is False (the default), look for a country-specific variant when neither the language code nor its generic variant is found. lru_cache should have a maxsize to prevent from memory exhaustion attacks, as the provided language codes are taken from the HTTP request. See also <https://www.djangoproject.com/weblog/2007/oct/26/security-fix/>. """ if lang_code: # If 'fr-ca' is not supported, try special fallback or language-only 'fr'. possible_lang_codes = [lang_code] try: possible_lang_codes.extend(LANG_INFO[lang_code]['fallback']) except KeyError: pass generic_lang_code = lang_code.split('-')[0] possible_lang_codes.append(generic_lang_code) supported_lang_codes = get_languages() for code in possible_lang_codes: if code in supported_lang_codes and check_for_language(code): return code if not strict: # if fr-fr is not supported, try fr-ca. for supported_code in supported_lang_codes: if supported_code.startswith(generic_lang_code + '-'): return supported_code raise LookupError(lang_code) def get_language_from_path(path, strict=False): """ Return the language code if there's a valid language code found in `path`. If `strict` is False (the default), look for a country-specific variant when neither the language code nor its generic variant is found. """ regex_match = language_code_prefix_re.match(path) if not regex_match: return None lang_code = regex_match.group(1) try: return get_supported_language_variant(lang_code, strict=strict) except LookupError: return None def get_language_from_request(request, check_path=False): """ Analyze the request to find what language the user wants the system to show. Only languages listed in settings.LANGUAGES are taken into account. If the user requests a sublanguage where we have a main language, we send out the main language. If check_path is True, the URL path prefix will be checked for a language code, otherwise this is skipped for backwards compatibility. """ if check_path: lang_code = get_language_from_path(request.path_info) if lang_code is not None: return lang_code lang_code = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME) if lang_code is not None and lang_code in get_languages() and check_for_language(lang_code): return lang_code try: return get_supported_language_variant(lang_code) except LookupError: pass accept = request.META.get('HTTP_ACCEPT_LANGUAGE', '') for accept_lang, unused in parse_accept_lang_header(accept): if accept_lang == '*': break if not language_code_re.search(accept_lang): continue try: return get_supported_language_variant(accept_lang) except LookupError: continue try: return get_supported_language_variant(settings.LANGUAGE_CODE) except LookupError: return settings.LANGUAGE_CODE @functools.lru_cache(maxsize=1000) def parse_accept_lang_header(lang_string): """ Parse the lang_string, which is the body of an HTTP Accept-Language header, and return a tuple of (lang, q-value), ordered by 'q' values. Return an empty tuple if there are any format errors in lang_string. """ result = [] pieces = accept_language_re.split(lang_string.lower()) if pieces[-1]: return () for i in range(0, len(pieces) - 1, 3): first, lang, priority = pieces[i:i + 3] if first: return () if priority: priority = float(priority) else: priority = 1.0 result.append((lang, priority)) result.sort(key=lambda k: k[1], reverse=True) return tuple(result)
8a9718d96cfbc874ecfe1930175d9fa031544ac3712ed9718863ab01db067111
from pathlib import Path from asgiref.local import Local from django.apps import apps def _is_django_module(module): """Return True if the given module is nested under Django.""" return module.__name__.startswith('django.') def watch_for_translation_changes(sender, **kwargs): """Register file watchers for .mo files in potential locale paths.""" from django.conf import settings if settings.USE_I18N: directories = [Path('locale')] directories.extend( Path(config.path) / 'locale' for config in apps.get_app_configs() if not _is_django_module(config.module) ) directories.extend(Path(p) for p in settings.LOCALE_PATHS) for path in directories: sender.watch_dir(path, '**/*.mo') def translation_file_changed(sender, file_path, **kwargs): """Clear the internal translations cache if a .mo file is modified.""" if file_path.suffix == '.mo': import gettext from django.utils.translation import trans_real gettext._translations = {} trans_real._translations = {} trans_real._default = None trans_real._active = Local() return True
a8e95b6b7c2e8b0a0eb09699e2be8c1f7b516316f96bbf839d68ef41919c4ada
from django.apps.registry import apps as global_apps from django.db import migrations, router from .exceptions import InvalidMigrationPlan from .loader import MigrationLoader from .recorder import MigrationRecorder from .state import ProjectState class MigrationExecutor: """ End-to-end migration execution - load migrations and run them up or down to a specified set of targets. """ def __init__(self, connection, progress_callback=None): self.connection = connection self.loader = MigrationLoader(self.connection) self.recorder = MigrationRecorder(self.connection) self.progress_callback = progress_callback def migration_plan(self, targets, clean_start=False): """ Given a set of targets, return a list of (Migration instance, backwards?). """ plan = [] if clean_start: applied = {} else: applied = dict(self.loader.applied_migrations) for target in targets: # If the target is (app_label, None), that means unmigrate everything if target[1] is None: for root in self.loader.graph.root_nodes(): if root[0] == target[0]: for migration in self.loader.graph.backwards_plan(root): if migration in applied: plan.append((self.loader.graph.nodes[migration], True)) applied.pop(migration) # If the migration is already applied, do backwards mode, # otherwise do forwards mode. elif target in applied: # Don't migrate backwards all the way to the target node (that # may roll back dependencies in other apps that don't need to # be rolled back); instead roll back through target's immediate # child(ren) in the same app, and no further. next_in_app = sorted( n for n in self.loader.graph.node_map[target].children if n[0] == target[0] ) for node in next_in_app: for migration in self.loader.graph.backwards_plan(node): if migration in applied: plan.append((self.loader.graph.nodes[migration], True)) applied.pop(migration) else: for migration in self.loader.graph.forwards_plan(target): if migration not in applied: plan.append((self.loader.graph.nodes[migration], False)) applied[migration] = self.loader.graph.nodes[migration] return plan def _create_project_state(self, with_applied_migrations=False): """ Create a project state including all the applications without migrations and applied migrations if with_applied_migrations=True. """ state = ProjectState(real_apps=list(self.loader.unmigrated_apps)) if with_applied_migrations: # Create the forwards plan Django would follow on an empty database full_plan = self.migration_plan(self.loader.graph.leaf_nodes(), clean_start=True) applied_migrations = { self.loader.graph.nodes[key] for key in self.loader.applied_migrations if key in self.loader.graph.nodes } for migration, _ in full_plan: if migration in applied_migrations: migration.mutate_state(state, preserve=False) return state def migrate(self, targets, plan=None, state=None, fake=False, fake_initial=False): """ Migrate the database up to the given targets. Django first needs to create all project states before a migration is (un)applied and in a second step run all the database operations. """ # The django_migrations table must be present to record applied # migrations. self.recorder.ensure_schema() if plan is None: plan = self.migration_plan(targets) # Create the forwards plan Django would follow on an empty database full_plan = self.migration_plan(self.loader.graph.leaf_nodes(), clean_start=True) all_forwards = all(not backwards for mig, backwards in plan) all_backwards = all(backwards for mig, backwards in plan) if not plan: if state is None: # The resulting state should include applied migrations. state = self._create_project_state(with_applied_migrations=True) elif all_forwards == all_backwards: # This should only happen if there's a mixed plan raise InvalidMigrationPlan( "Migration plans with both forwards and backwards migrations " "are not supported. Please split your migration process into " "separate plans of only forwards OR backwards migrations.", plan ) elif all_forwards: if state is None: # The resulting state should still include applied migrations. state = self._create_project_state(with_applied_migrations=True) state = self._migrate_all_forwards(state, plan, full_plan, fake=fake, fake_initial=fake_initial) else: # No need to check for `elif all_backwards` here, as that condition # would always evaluate to true. state = self._migrate_all_backwards(plan, full_plan, fake=fake) self.check_replacements() return state def _migrate_all_forwards(self, state, plan, full_plan, fake, fake_initial): """ Take a list of 2-tuples of the form (migration instance, False) and apply them in the order they occur in the full_plan. """ migrations_to_run = {m[0] for m in plan} for migration, _ in full_plan: if not migrations_to_run: # We remove every migration that we applied from these sets so # that we can bail out once the last migration has been applied # and don't always run until the very end of the migration # process. break if migration in migrations_to_run: if 'apps' not in state.__dict__: if self.progress_callback: self.progress_callback("render_start") state.apps # Render all -- performance critical if self.progress_callback: self.progress_callback("render_success") state = self.apply_migration(state, migration, fake=fake, fake_initial=fake_initial) migrations_to_run.remove(migration) return state def _migrate_all_backwards(self, plan, full_plan, fake): """ Take a list of 2-tuples of the form (migration instance, True) and unapply them in reverse order they occur in the full_plan. Since unapplying a migration requires the project state prior to that migration, Django will compute the migration states before each of them in a first run over the plan and then unapply them in a second run over the plan. """ migrations_to_run = {m[0] for m in plan} # Holds all migration states prior to the migrations being unapplied states = {} state = self._create_project_state() applied_migrations = { self.loader.graph.nodes[key] for key in self.loader.applied_migrations if key in self.loader.graph.nodes } if self.progress_callback: self.progress_callback("render_start") for migration, _ in full_plan: if not migrations_to_run: # We remove every migration that we applied from this set so # that we can bail out once the last migration has been applied # and don't always run until the very end of the migration # process. break if migration in migrations_to_run: if 'apps' not in state.__dict__: state.apps # Render all -- performance critical # The state before this migration states[migration] = state # The old state keeps as-is, we continue with the new state state = migration.mutate_state(state, preserve=True) migrations_to_run.remove(migration) elif migration in applied_migrations: # Only mutate the state if the migration is actually applied # to make sure the resulting state doesn't include changes # from unrelated migrations. migration.mutate_state(state, preserve=False) if self.progress_callback: self.progress_callback("render_success") for migration, _ in plan: self.unapply_migration(states[migration], migration, fake=fake) applied_migrations.remove(migration) # Generate the post migration state by starting from the state before # the last migration is unapplied and mutating it to include all the # remaining applied migrations. last_unapplied_migration = plan[-1][0] state = states[last_unapplied_migration] for index, (migration, _) in enumerate(full_plan): if migration == last_unapplied_migration: for migration, _ in full_plan[index:]: if migration in applied_migrations: migration.mutate_state(state, preserve=False) break return state def apply_migration(self, state, migration, fake=False, fake_initial=False): """Run a migration forwards.""" migration_recorded = False if self.progress_callback: self.progress_callback("apply_start", migration, fake) if not fake: if fake_initial: # Test to see if this is an already-applied initial migration applied, state = self.detect_soft_applied(state, migration) if applied: fake = True if not fake: # Alright, do it normally with self.connection.schema_editor(atomic=migration.atomic) as schema_editor: state = migration.apply(state, schema_editor) self.record_migration(migration) migration_recorded = True if not migration_recorded: self.record_migration(migration) # Report progress if self.progress_callback: self.progress_callback("apply_success", migration, fake) return state def record_migration(self, migration): # For replacement migrations, record individual statuses if migration.replaces: for app_label, name in migration.replaces: self.recorder.record_applied(app_label, name) else: self.recorder.record_applied(migration.app_label, migration.name) def unapply_migration(self, state, migration, fake=False): """Run a migration backwards.""" if self.progress_callback: self.progress_callback("unapply_start", migration, fake) if not fake: with self.connection.schema_editor(atomic=migration.atomic) as schema_editor: state = migration.unapply(state, schema_editor) # For replacement migrations, record individual statuses if migration.replaces: for app_label, name in migration.replaces: self.recorder.record_unapplied(app_label, name) else: self.recorder.record_unapplied(migration.app_label, migration.name) # Report progress if self.progress_callback: self.progress_callback("unapply_success", migration, fake) return state def check_replacements(self): """ Mark replacement migrations applied if their replaced set all are. Do this unconditionally on every migrate, rather than just when migrations are applied or unapplied, to correctly handle the case when a new squash migration is pushed to a deployment that already had all its replaced migrations applied. In this case no new migration will be applied, but the applied state of the squashed migration must be maintained. """ applied = self.recorder.applied_migrations() for key, migration in self.loader.replacements.items(): all_applied = all(m in applied for m in migration.replaces) if all_applied and key not in applied: self.recorder.record_applied(*key) def detect_soft_applied(self, project_state, migration): """ Test whether a migration has been implicitly applied - that the tables or columns it would create exist. This is intended only for use on initial migrations (as it only looks for CreateModel and AddField). """ def should_skip_detecting_model(migration, model): """ No need to detect tables for proxy models, unmanaged models, or models that can't be migrated on the current database. """ return ( model._meta.proxy or not model._meta.managed or not router.allow_migrate( self.connection.alias, migration.app_label, model_name=model._meta.model_name, ) ) if migration.initial is None: # Bail if the migration isn't the first one in its app if any(app == migration.app_label for app, name in migration.dependencies): return False, project_state elif migration.initial is False: # Bail if it's NOT an initial migration return False, project_state if project_state is None: after_state = self.loader.project_state((migration.app_label, migration.name), at_end=True) else: after_state = migration.mutate_state(project_state) apps = after_state.apps found_create_model_migration = False found_add_field_migration = False fold_identifier_case = self.connection.features.ignores_table_name_case with self.connection.cursor() as cursor: existing_table_names = set(self.connection.introspection.table_names(cursor)) if fold_identifier_case: existing_table_names = {name.casefold() for name in existing_table_names} # Make sure all create model and add field operations are done for operation in migration.operations: if isinstance(operation, migrations.CreateModel): model = apps.get_model(migration.app_label, operation.name) if model._meta.swapped: # We have to fetch the model to test with from the # main app cache, as it's not a direct dependency. model = global_apps.get_model(model._meta.swapped) if should_skip_detecting_model(migration, model): continue db_table = model._meta.db_table if fold_identifier_case: db_table = db_table.casefold() if db_table not in existing_table_names: return False, project_state found_create_model_migration = True elif isinstance(operation, migrations.AddField): model = apps.get_model(migration.app_label, operation.model_name) if model._meta.swapped: # We have to fetch the model to test with from the # main app cache, as it's not a direct dependency. model = global_apps.get_model(model._meta.swapped) if should_skip_detecting_model(migration, model): continue table = model._meta.db_table field = model._meta.get_field(operation.name) # Handle implicit many-to-many tables created by AddField. if field.many_to_many: through_db_table = field.remote_field.through._meta.db_table if fold_identifier_case: through_db_table = through_db_table.casefold() if through_db_table not in existing_table_names: return False, project_state else: found_add_field_migration = True continue with self.connection.cursor() as cursor: columns = self.connection.introspection.get_table_description(cursor, table) for column in columns: field_column = field.column column_name = column.name if fold_identifier_case: column_name = column_name.casefold() field_column = field_column.casefold() if column_name == field_column: found_add_field_migration = True break else: return False, project_state # If we get this far and we found at least one CreateModel or AddField migration, # the migration is considered implicitly applied. return (found_create_model_migration or found_add_field_migration), after_state
65398d0931b801ef9dd690ef0133762b785411e0a77e1eb78591395693d97f26
import copy from contextlib import contextmanager from django.apps import AppConfig from django.apps.registry import Apps, apps as global_apps from django.conf import settings from django.db import models from django.db.models.fields.related import RECURSIVE_RELATIONSHIP_CONSTANT from django.db.models.options import DEFAULT_NAMES, normalize_together from django.db.models.utils import make_model_tuple from django.utils.functional import cached_property from django.utils.module_loading import import_string from django.utils.version import get_docs_version from .exceptions import InvalidBasesError def _get_app_label_and_model_name(model, app_label=''): if isinstance(model, str): split = model.split('.', 1) return tuple(split) if len(split) == 2 else (app_label, split[0]) else: return model._meta.app_label, model._meta.model_name def _get_related_models(m): """Return all models that have a direct relationship to the given model.""" related_models = [ subclass for subclass in m.__subclasses__() if issubclass(subclass, models.Model) ] related_fields_models = set() for f in m._meta.get_fields(include_parents=True, include_hidden=True): if f.is_relation and f.related_model is not None and not isinstance(f.related_model, str): related_fields_models.add(f.model) related_models.append(f.related_model) # Reverse accessors of foreign keys to proxy models are attached to their # concrete proxied model. opts = m._meta if opts.proxy and m in related_fields_models: related_models.append(opts.concrete_model) return related_models def get_related_models_tuples(model): """ Return a list of typical (app_label, model_name) tuples for all related models for the given model. """ return { (rel_mod._meta.app_label, rel_mod._meta.model_name) for rel_mod in _get_related_models(model) } def get_related_models_recursive(model): """ Return all models that have a direct or indirect relationship to the given model. Relationships are either defined by explicit relational fields, like ForeignKey, ManyToManyField or OneToOneField, or by inheriting from another model (a superclass is related to its subclasses, but not vice versa). Note, however, that a model inheriting from a concrete model is also related to its superclass through the implicit *_ptr OneToOneField on the subclass. """ seen = set() queue = _get_related_models(model) for rel_mod in queue: rel_app_label, rel_model_name = rel_mod._meta.app_label, rel_mod._meta.model_name if (rel_app_label, rel_model_name) in seen: continue seen.add((rel_app_label, rel_model_name)) queue.extend(_get_related_models(rel_mod)) return seen - {(model._meta.app_label, model._meta.model_name)} class ProjectState: """ Represent the entire project's overall state. This is the item that is passed around - do it here rather than at the app level so that cross-app FKs/etc. resolve properly. """ def __init__(self, models=None, real_apps=None): self.models = models or {} # Apps to include from main registry, usually unmigrated ones self.real_apps = real_apps or [] self.is_delayed = False def add_model(self, model_state): app_label, model_name = model_state.app_label, model_state.name_lower self.models[(app_label, model_name)] = model_state if 'apps' in self.__dict__: # hasattr would cache the property self.reload_model(app_label, model_name) def remove_model(self, app_label, model_name): del self.models[app_label, model_name] if 'apps' in self.__dict__: # hasattr would cache the property self.apps.unregister_model(app_label, model_name) # Need to do this explicitly since unregister_model() doesn't clear # the cache automatically (#24513) self.apps.clear_cache() def _find_reload_model(self, app_label, model_name, delay=False): if delay: self.is_delayed = True related_models = set() try: old_model = self.apps.get_model(app_label, model_name) except LookupError: pass else: # Get all relations to and from the old model before reloading, # as _meta.apps may change if delay: related_models = get_related_models_tuples(old_model) else: related_models = get_related_models_recursive(old_model) # Get all outgoing references from the model to be rendered model_state = self.models[(app_label, model_name)] # Directly related models are the models pointed to by ForeignKeys, # OneToOneFields, and ManyToManyFields. direct_related_models = set() for field in model_state.fields.values(): if field.is_relation: if field.remote_field.model == RECURSIVE_RELATIONSHIP_CONSTANT: continue rel_app_label, rel_model_name = _get_app_label_and_model_name(field.related_model, app_label) direct_related_models.add((rel_app_label, rel_model_name.lower())) # For all direct related models recursively get all related models. related_models.update(direct_related_models) for rel_app_label, rel_model_name in direct_related_models: try: rel_model = self.apps.get_model(rel_app_label, rel_model_name) except LookupError: pass else: if delay: related_models.update(get_related_models_tuples(rel_model)) else: related_models.update(get_related_models_recursive(rel_model)) # Include the model itself related_models.add((app_label, model_name)) return related_models def reload_model(self, app_label, model_name, delay=False): if 'apps' in self.__dict__: # hasattr would cache the property related_models = self._find_reload_model(app_label, model_name, delay) self._reload(related_models) def reload_models(self, models, delay=True): if 'apps' in self.__dict__: # hasattr would cache the property related_models = set() for app_label, model_name in models: related_models.update(self._find_reload_model(app_label, model_name, delay)) self._reload(related_models) def _reload(self, related_models): # Unregister all related models with self.apps.bulk_update(): for rel_app_label, rel_model_name in related_models: self.apps.unregister_model(rel_app_label, rel_model_name) states_to_be_rendered = [] # Gather all models states of those models that will be rerendered. # This includes: # 1. All related models of unmigrated apps for model_state in self.apps.real_models: if (model_state.app_label, model_state.name_lower) in related_models: states_to_be_rendered.append(model_state) # 2. All related models of migrated apps for rel_app_label, rel_model_name in related_models: try: model_state = self.models[rel_app_label, rel_model_name] except KeyError: pass else: states_to_be_rendered.append(model_state) # Render all models self.apps.render_multiple(states_to_be_rendered) def clone(self): """Return an exact copy of this ProjectState.""" new_state = ProjectState( models={k: v.clone() for k, v in self.models.items()}, real_apps=self.real_apps, ) if 'apps' in self.__dict__: new_state.apps = self.apps.clone() new_state.is_delayed = self.is_delayed return new_state def clear_delayed_apps_cache(self): if self.is_delayed and 'apps' in self.__dict__: del self.__dict__['apps'] @cached_property def apps(self): return StateApps(self.real_apps, self.models) @property def concrete_apps(self): self.apps = StateApps(self.real_apps, self.models, ignore_swappable=True) return self.apps @classmethod def from_apps(cls, apps): """Take an Apps and return a ProjectState matching it.""" app_models = {} for model in apps.get_models(include_swapped=True): model_state = ModelState.from_model(model) app_models[(model_state.app_label, model_state.name_lower)] = model_state return cls(app_models) def __eq__(self, other): return self.models == other.models and set(self.real_apps) == set(other.real_apps) class AppConfigStub(AppConfig): """Stub of an AppConfig. Only provides a label and a dict of models.""" # Not used, but required by AppConfig.__init__ path = '' def __init__(self, label): self.label = label # App-label and app-name are not the same thing, so technically passing # in the label here is wrong. In practice, migrations don't care about # the app name, but we need something unique, and the label works fine. super().__init__(label, None) def import_models(self): self.models = self.apps.all_models[self.label] class StateApps(Apps): """ Subclass of the global Apps registry class to better handle dynamic model additions and removals. """ def __init__(self, real_apps, models, ignore_swappable=False): # Any apps in self.real_apps should have all their models included # in the render. We don't use the original model instances as there # are some variables that refer to the Apps object. # FKs/M2Ms from real apps are also not included as they just # mess things up with partial states (due to lack of dependencies) self.real_models = [] for app_label in real_apps: app = global_apps.get_app_config(app_label) for model in app.get_models(): self.real_models.append(ModelState.from_model(model, exclude_rels=True)) # Populate the app registry with a stub for each application. app_labels = {model_state.app_label for model_state in models.values()} app_configs = [AppConfigStub(label) for label in sorted([*real_apps, *app_labels])] super().__init__(app_configs) # These locks get in the way of copying as implemented in clone(), # which is called whenever Django duplicates a StateApps before # updating it. self._lock = None self.ready_event = None self.render_multiple([*models.values(), *self.real_models]) # There shouldn't be any operations pending at this point. from django.core.checks.model_checks import _check_lazy_references ignore = {make_model_tuple(settings.AUTH_USER_MODEL)} if ignore_swappable else set() errors = _check_lazy_references(self, ignore=ignore) if errors: raise ValueError("\n".join(error.msg for error in errors)) @contextmanager def bulk_update(self): # Avoid clearing each model's cache for each change. Instead, clear # all caches when we're finished updating the model instances. ready = self.ready self.ready = False try: yield finally: self.ready = ready self.clear_cache() def render_multiple(self, model_states): # We keep trying to render the models in a loop, ignoring invalid # base errors, until the size of the unrendered models doesn't # decrease by at least one, meaning there's a base dependency loop/ # missing base. if not model_states: return # Prevent that all model caches are expired for each render. with self.bulk_update(): unrendered_models = model_states while unrendered_models: new_unrendered_models = [] for model in unrendered_models: try: model.render(self) except InvalidBasesError: new_unrendered_models.append(model) if len(new_unrendered_models) == len(unrendered_models): raise InvalidBasesError( "Cannot resolve bases for %r\nThis can happen if you are inheriting models from an " "app with migrations (e.g. contrib.auth)\n in an app with no migrations; see " "https://docs.djangoproject.com/en/%s/topics/migrations/#dependencies " "for more" % (new_unrendered_models, get_docs_version()) ) unrendered_models = new_unrendered_models def clone(self): """Return a clone of this registry.""" clone = StateApps([], {}) clone.all_models = copy.deepcopy(self.all_models) clone.app_configs = copy.deepcopy(self.app_configs) # Set the pointer to the correct app registry. for app_config in clone.app_configs.values(): app_config.apps = clone # No need to actually clone them, they'll never change clone.real_models = self.real_models return clone def register_model(self, app_label, model): self.all_models[app_label][model._meta.model_name] = model if app_label not in self.app_configs: self.app_configs[app_label] = AppConfigStub(app_label) self.app_configs[app_label].apps = self self.app_configs[app_label].models = {} self.app_configs[app_label].models[model._meta.model_name] = model self.do_pending_operations(model) self.clear_cache() def unregister_model(self, app_label, model_name): try: del self.all_models[app_label][model_name] del self.app_configs[app_label].models[model_name] except KeyError: pass class ModelState: """ Represent a Django Model. Don't use the actual Model class as it's not designed to have its options changed - instead, mutate this one and then render it into a Model as required. Note that while you are allowed to mutate .fields, you are not allowed to mutate the Field instances inside there themselves - you must instead assign new ones, as these are not detached during a clone. """ def __init__(self, app_label, name, fields, options=None, bases=None, managers=None): self.app_label = app_label self.name = name self.fields = dict(fields) self.options = options or {} self.options.setdefault('indexes', []) self.options.setdefault('constraints', []) self.bases = bases or (models.Model,) self.managers = managers or [] for name, field in self.fields.items(): # Sanity-check that fields are NOT already bound to a model. if hasattr(field, 'model'): raise ValueError( 'ModelState.fields cannot be bound to a model - "%s" is.' % name ) # Sanity-check that relation fields are NOT referring to a model class. if field.is_relation and hasattr(field.related_model, '_meta'): raise ValueError( 'ModelState.fields cannot refer to a model class - "%s.to" does. ' 'Use a string reference instead.' % name ) if field.many_to_many and hasattr(field.remote_field.through, '_meta'): raise ValueError( 'ModelState.fields cannot refer to a model class - "%s.through" does. ' 'Use a string reference instead.' % name ) # Sanity-check that indexes have their name set. for index in self.options['indexes']: if not index.name: raise ValueError( "Indexes passed to ModelState require a name attribute. " "%r doesn't have one." % index ) @cached_property def name_lower(self): return self.name.lower() @classmethod def from_model(cls, model, exclude_rels=False): """Given a model, return a ModelState representing it.""" # Deconstruct the fields fields = [] for field in model._meta.local_fields: if getattr(field, "remote_field", None) and exclude_rels: continue if isinstance(field, models.OrderWrt): continue name = field.name try: fields.append((name, field.clone())) except TypeError as e: raise TypeError("Couldn't reconstruct field %s on %s: %s" % ( name, model._meta.label, e, )) if not exclude_rels: for field in model._meta.local_many_to_many: name = field.name try: fields.append((name, field.clone())) except TypeError as e: raise TypeError("Couldn't reconstruct m2m field %s on %s: %s" % ( name, model._meta.object_name, e, )) # Extract the options options = {} for name in DEFAULT_NAMES: # Ignore some special options if name in ["apps", "app_label"]: continue elif name in model._meta.original_attrs: if name == "unique_together": ut = model._meta.original_attrs["unique_together"] options[name] = set(normalize_together(ut)) elif name == "index_together": it = model._meta.original_attrs["index_together"] options[name] = set(normalize_together(it)) elif name == "indexes": indexes = [idx.clone() for idx in model._meta.indexes] for index in indexes: if not index.name: index.set_name_with_model(model) options['indexes'] = indexes elif name == 'constraints': options['constraints'] = [con.clone() for con in model._meta.constraints] else: options[name] = model._meta.original_attrs[name] # If we're ignoring relationships, remove all field-listing model # options (that option basically just means "make a stub model") if exclude_rels: for key in ["unique_together", "index_together", "order_with_respect_to"]: if key in options: del options[key] # Private fields are ignored, so remove options that refer to them. elif options.get('order_with_respect_to') in {field.name for field in model._meta.private_fields}: del options['order_with_respect_to'] def flatten_bases(model): bases = [] for base in model.__bases__: if hasattr(base, "_meta") and base._meta.abstract: bases.extend(flatten_bases(base)) else: bases.append(base) return bases # We can't rely on __mro__ directly because we only want to flatten # abstract models and not the whole tree. However by recursing on # __bases__ we may end up with duplicates and ordering issues, we # therefore discard any duplicates and reorder the bases according # to their index in the MRO. flattened_bases = sorted(set(flatten_bases(model)), key=lambda x: model.__mro__.index(x)) # Make our record bases = tuple( ( base._meta.label_lower if hasattr(base, "_meta") else base ) for base in flattened_bases ) # Ensure at least one base inherits from models.Model if not any((isinstance(base, str) or issubclass(base, models.Model)) for base in bases): bases = (models.Model,) managers = [] manager_names = set() default_manager_shim = None for manager in model._meta.managers: if manager.name in manager_names: # Skip overridden managers. continue elif manager.use_in_migrations: # Copy managers usable in migrations. new_manager = copy.copy(manager) new_manager._set_creation_counter() elif manager is model._base_manager or manager is model._default_manager: # Shim custom managers used as default and base managers. new_manager = models.Manager() new_manager.model = manager.model new_manager.name = manager.name if manager is model._default_manager: default_manager_shim = new_manager else: continue manager_names.add(manager.name) managers.append((manager.name, new_manager)) # Ignore a shimmed default manager called objects if it's the only one. if managers == [('objects', default_manager_shim)]: managers = [] # Construct the new ModelState return cls( model._meta.app_label, model._meta.object_name, fields, options, bases, managers, ) def construct_managers(self): """Deep-clone the managers using deconstruction.""" # Sort all managers by their creation counter sorted_managers = sorted(self.managers, key=lambda v: v[1].creation_counter) for mgr_name, manager in sorted_managers: as_manager, manager_path, qs_path, args, kwargs = manager.deconstruct() if as_manager: qs_class = import_string(qs_path) yield mgr_name, qs_class.as_manager() else: manager_class = import_string(manager_path) yield mgr_name, manager_class(*args, **kwargs) def clone(self): """Return an exact copy of this ModelState.""" return self.__class__( app_label=self.app_label, name=self.name, fields=dict(self.fields), # Since options are shallow-copied here, operations such as # AddIndex must replace their option (e.g 'indexes') rather # than mutating it. options=dict(self.options), bases=self.bases, managers=list(self.managers), ) def render(self, apps): """Create a Model object from our current state into the given apps.""" # First, make a Meta object meta_contents = {'app_label': self.app_label, 'apps': apps, **self.options} meta = type("Meta", (), meta_contents) # Then, work out our bases try: bases = tuple( (apps.get_model(base) if isinstance(base, str) else base) for base in self.bases ) except LookupError: raise InvalidBasesError("Cannot resolve one or more bases from %r" % (self.bases,)) # Clone fields for the body, add other bits. body = {name: field.clone() for name, field in self.fields.items()} body['Meta'] = meta body['__module__'] = "__fake__" # Restore managers body.update(self.construct_managers()) # Then, make a Model object (apps.register_model is called in __new__) return type(self.name, bases, body) def get_index_by_name(self, name): for index in self.options['indexes']: if index.name == name: return index raise ValueError("No index named %s on model %s" % (name, self.name)) def get_constraint_by_name(self, name): for constraint in self.options['constraints']: if constraint.name == name: return constraint raise ValueError('No constraint named %s on model %s' % (name, self.name)) def __repr__(self): return "<%s: '%s.%s'>" % (self.__class__.__name__, self.app_label, self.name) def __eq__(self, other): return ( (self.app_label == other.app_label) and (self.name == other.name) and (len(self.fields) == len(other.fields)) and all( k1 == k2 and f1.deconstruct()[1:] == f2.deconstruct()[1:] for (k1, f1), (k2, f2) in zip( sorted(self.fields.items()), sorted(other.fields.items()), ) ) and (self.options == other.options) and (self.bases == other.bases) and (self.managers == other.managers) )
1584f3d07256fbe0bf39639426ebca9f96dbf1fbd363dbe65b2b8a73777a16fc
import pkgutil import sys from importlib import import_module, reload from django.apps import apps from django.conf import settings from django.db.migrations.graph import MigrationGraph from django.db.migrations.recorder import MigrationRecorder from .exceptions import ( AmbiguityError, BadMigrationError, InconsistentMigrationHistory, NodeNotFoundError, ) MIGRATIONS_MODULE_NAME = 'migrations' class MigrationLoader: """ Load migration files from disk and their status from the database. Migration files are expected to live in the "migrations" directory of an app. Their names are entirely unimportant from a code perspective, but will probably follow the 1234_name.py convention. On initialization, this class will scan those directories, and open and read the Python files, looking for a class called Migration, which should inherit from django.db.migrations.Migration. See django.db.migrations.migration for what that looks like. Some migrations will be marked as "replacing" another set of migrations. These are loaded into a separate set of migrations away from the main ones. If all the migrations they replace are either unapplied or missing from disk, then they are injected into the main set, replacing the named migrations. Any dependency pointers to the replaced migrations are re-pointed to the new migration. This does mean that this class MUST also talk to the database as well as to disk, but this is probably fine. We're already not just operating in memory. """ def __init__( self, connection, load=True, ignore_no_migrations=False, replace_migrations=True, ): self.connection = connection self.disk_migrations = None self.applied_migrations = None self.ignore_no_migrations = ignore_no_migrations self.replace_migrations = replace_migrations if load: self.build_graph() @classmethod def migrations_module(cls, app_label): """ Return the path to the migrations module for the specified app_label and a boolean indicating if the module is specified in settings.MIGRATION_MODULE. """ if app_label in settings.MIGRATION_MODULES: return settings.MIGRATION_MODULES[app_label], True else: app_package_name = apps.get_app_config(app_label).name return '%s.%s' % (app_package_name, MIGRATIONS_MODULE_NAME), False def load_disk(self): """Load the migrations from all INSTALLED_APPS from disk.""" self.disk_migrations = {} self.unmigrated_apps = set() self.migrated_apps = set() for app_config in apps.get_app_configs(): # Get the migrations module directory module_name, explicit = self.migrations_module(app_config.label) if module_name is None: self.unmigrated_apps.add(app_config.label) continue was_loaded = module_name in sys.modules try: module = import_module(module_name) except ModuleNotFoundError as e: if ( (explicit and self.ignore_no_migrations) or (not explicit and MIGRATIONS_MODULE_NAME in e.name.split('.')) ): self.unmigrated_apps.add(app_config.label) continue raise else: # Module is not a package (e.g. migrations.py). if not hasattr(module, '__path__'): self.unmigrated_apps.add(app_config.label) continue # Force a reload if it's already loaded (tests need this) if was_loaded: reload(module) migration_names = { name for _, name, is_pkg in pkgutil.iter_modules(module.__path__) if not is_pkg and name[0] not in '_~' } if migration_names or self.ignore_no_migrations: self.migrated_apps.add(app_config.label) else: self.unmigrated_apps.add(app_config.label) # Load migrations for migration_name in migration_names: migration_path = '%s.%s' % (module_name, migration_name) try: migration_module = import_module(migration_path) except ImportError as e: if 'bad magic number' in str(e): raise ImportError( "Couldn't import %r as it appears to be a stale " ".pyc file." % migration_path ) from e else: raise if not hasattr(migration_module, "Migration"): raise BadMigrationError( "Migration %s in app %s has no Migration class" % (migration_name, app_config.label) ) self.disk_migrations[app_config.label, migration_name] = migration_module.Migration( migration_name, app_config.label, ) def get_migration(self, app_label, name_prefix): """Return the named migration or raise NodeNotFoundError.""" return self.graph.nodes[app_label, name_prefix] def get_migration_by_prefix(self, app_label, name_prefix): """ Return the migration(s) which match the given app label and name_prefix. """ # Do the search results = [] for migration_app_label, migration_name in self.disk_migrations: if migration_app_label == app_label and migration_name.startswith(name_prefix): results.append((migration_app_label, migration_name)) if len(results) > 1: raise AmbiguityError( "There is more than one migration for '%s' with the prefix '%s'" % (app_label, name_prefix) ) elif not results: raise KeyError("There no migrations for '%s' with the prefix '%s'" % (app_label, name_prefix)) else: return self.disk_migrations[results[0]] def check_key(self, key, current_app): if (key[1] != "__first__" and key[1] != "__latest__") or key in self.graph: return key # Special-case __first__, which means "the first migration" for # migrated apps, and is ignored for unmigrated apps. It allows # makemigrations to declare dependencies on apps before they even have # migrations. if key[0] == current_app: # Ignore __first__ references to the same app (#22325) return if key[0] in self.unmigrated_apps: # This app isn't migrated, but something depends on it. # The models will get auto-added into the state, though # so we're fine. return if key[0] in self.migrated_apps: try: if key[1] == "__first__": return self.graph.root_nodes(key[0])[0] else: # "__latest__" return self.graph.leaf_nodes(key[0])[0] except IndexError: if self.ignore_no_migrations: return None else: raise ValueError("Dependency on app with no migrations: %s" % key[0]) raise ValueError("Dependency on unknown app: %s" % key[0]) def add_internal_dependencies(self, key, migration): """ Internal dependencies need to be added first to ensure `__first__` dependencies find the correct root node. """ for parent in migration.dependencies: # Ignore __first__ references to the same app. if parent[0] == key[0] and parent[1] != '__first__': self.graph.add_dependency(migration, key, parent, skip_validation=True) def add_external_dependencies(self, key, migration): for parent in migration.dependencies: # Skip internal dependencies if key[0] == parent[0]: continue parent = self.check_key(parent, key[0]) if parent is not None: self.graph.add_dependency(migration, key, parent, skip_validation=True) for child in migration.run_before: child = self.check_key(child, key[0]) if child is not None: self.graph.add_dependency(migration, child, key, skip_validation=True) def build_graph(self): """ Build a migration dependency graph using both the disk and database. You'll need to rebuild the graph if you apply migrations. This isn't usually a problem as generally migration stuff runs in a one-shot process. """ # Load disk data self.load_disk() # Load database data if self.connection is None: self.applied_migrations = {} else: recorder = MigrationRecorder(self.connection) self.applied_migrations = recorder.applied_migrations() # To start, populate the migration graph with nodes for ALL migrations # and their dependencies. Also make note of replacing migrations at this step. self.graph = MigrationGraph() self.replacements = {} for key, migration in self.disk_migrations.items(): self.graph.add_node(key, migration) # Replacing migrations. if migration.replaces: self.replacements[key] = migration for key, migration in self.disk_migrations.items(): # Internal (same app) dependencies. self.add_internal_dependencies(key, migration) # Add external dependencies now that the internal ones have been resolved. for key, migration in self.disk_migrations.items(): self.add_external_dependencies(key, migration) # Carry out replacements where possible and if enabled. if self.replace_migrations: for key, migration in self.replacements.items(): # Get applied status of each of this migration's replacement # targets. applied_statuses = [(target in self.applied_migrations) for target in migration.replaces] # The replacing migration is only marked as applied if all of # its replacement targets are. if all(applied_statuses): self.applied_migrations[key] = migration else: self.applied_migrations.pop(key, None) # A replacing migration can be used if either all or none of # its replacement targets have been applied. if all(applied_statuses) or (not any(applied_statuses)): self.graph.remove_replaced_nodes(key, migration.replaces) else: # This replacing migration cannot be used because it is # partially applied. Remove it from the graph and remap # dependencies to it (#25945). self.graph.remove_replacement_node(key, migration.replaces) # Ensure the graph is consistent. try: self.graph.validate_consistency() except NodeNotFoundError as exc: # Check if the missing node could have been replaced by any squash # migration but wasn't because the squash migration was partially # applied before. In that case raise a more understandable exception # (#23556). # Get reverse replacements. reverse_replacements = {} for key, migration in self.replacements.items(): for replaced in migration.replaces: reverse_replacements.setdefault(replaced, set()).add(key) # Try to reraise exception with more detail. if exc.node in reverse_replacements: candidates = reverse_replacements.get(exc.node, set()) is_replaced = any(candidate in self.graph.nodes for candidate in candidates) if not is_replaced: tries = ', '.join('%s.%s' % c for c in candidates) raise NodeNotFoundError( "Migration {0} depends on nonexistent node ('{1}', '{2}'). " "Django tried to replace migration {1}.{2} with any of [{3}] " "but wasn't able to because some of the replaced migrations " "are already applied.".format( exc.origin, exc.node[0], exc.node[1], tries ), exc.node ) from exc raise self.graph.ensure_not_cyclic() def check_consistent_history(self, connection): """ Raise InconsistentMigrationHistory if any applied migrations have unapplied dependencies. """ recorder = MigrationRecorder(connection) applied = recorder.applied_migrations() for migration in applied: # If the migration is unknown, skip it. if migration not in self.graph.nodes: continue for parent in self.graph.node_map[migration].parents: if parent not in applied: # Skip unapplied squashed migrations that have all of their # `replaces` applied. if parent in self.replacements: if all(m in applied for m in self.replacements[parent].replaces): continue raise InconsistentMigrationHistory( "Migration {}.{} is applied before its dependency " "{}.{} on database '{}'.".format( migration[0], migration[1], parent[0], parent[1], connection.alias, ) ) def detect_conflicts(self): """ Look through the loaded graph and detect any conflicts - apps with more than one leaf migration. Return a dict of the app labels that conflict with the migration names that conflict. """ seen_apps = {} conflicting_apps = set() for app_label, migration_name in self.graph.leaf_nodes(): if app_label in seen_apps: conflicting_apps.add(app_label) seen_apps.setdefault(app_label, set()).add(migration_name) return {app_label: seen_apps[app_label] for app_label in conflicting_apps} def project_state(self, nodes=None, at_end=True): """ Return a ProjectState object representing the most recent state that the loaded migrations represent. See graph.make_state() for the meaning of "nodes" and "at_end". """ return self.graph.make_state(nodes=nodes, at_end=at_end, real_apps=list(self.unmigrated_apps)) def collect_sql(self, plan): """ Take a migration plan and return a list of collected SQL statements that represent the best-efforts version of that plan. """ statements = [] state = None for migration, backwards in plan: with self.connection.schema_editor(collect_sql=True, atomic=migration.atomic) as schema_editor: if state is None: state = self.project_state((migration.app_label, migration.name), at_end=False) if not backwards: state = migration.apply(state, schema_editor, collect_sql=True) else: state = migration.unapply(state, schema_editor, collect_sql=True) statements.extend(schema_editor.collected_sql) return statements
5cb4d9fee7e95495ff9cbe1e803106e43aaf07c7aa48652e56830d6759695de9
from django.db import DatabaseError class AmbiguityError(Exception): """More than one migration matches a name prefix.""" pass class BadMigrationError(Exception): """There's a bad migration (unreadable/bad format/etc.).""" pass class CircularDependencyError(Exception): """There's an impossible-to-resolve circular dependency.""" pass class InconsistentMigrationHistory(Exception): """An applied migration has some of its dependencies not applied.""" pass class InvalidBasesError(ValueError): """A model's base classes can't be resolved.""" pass class IrreversibleError(RuntimeError): """An irreversible migration is about to be reversed.""" pass class NodeNotFoundError(LookupError): """An attempt on a node is made that is not available in the graph.""" def __init__(self, message, node, origin=None): self.message = message self.origin = origin self.node = node def __str__(self): return self.message def __repr__(self): return "NodeNotFoundError(%r)" % (self.node,) class MigrationSchemaMissing(DatabaseError): pass class InvalidMigrationPlan(ValueError): pass
0efbe4b65de35aa99031546b4e9edd3430449ba6a7e4be67405af6e514a932e1
import datetime import importlib import os import sys from django.apps import apps from django.db.models import NOT_PROVIDED from django.utils import timezone from .loader import MigrationLoader class MigrationQuestioner: """ Give the autodetector responses to questions it might have. This base class has a built-in noninteractive mode, but the interactive subclass is what the command-line arguments will use. """ def __init__(self, defaults=None, specified_apps=None, dry_run=None): self.defaults = defaults or {} self.specified_apps = specified_apps or set() self.dry_run = dry_run def ask_initial(self, app_label): """Should we create an initial migration for the app?""" # If it was specified on the command line, definitely true if app_label in self.specified_apps: return True # Otherwise, we look to see if it has a migrations module # without any Python files in it, apart from __init__.py. # Apps from the new app template will have these; the Python # file check will ensure we skip South ones. try: app_config = apps.get_app_config(app_label) except LookupError: # It's a fake app. return self.defaults.get("ask_initial", False) migrations_import_path, _ = MigrationLoader.migrations_module(app_config.label) if migrations_import_path is None: # It's an application with migrations disabled. return self.defaults.get("ask_initial", False) try: migrations_module = importlib.import_module(migrations_import_path) except ImportError: return self.defaults.get("ask_initial", False) else: # getattr() needed on PY36 and older (replace with attribute access). if getattr(migrations_module, "__file__", None): filenames = os.listdir(os.path.dirname(migrations_module.__file__)) elif hasattr(migrations_module, "__path__"): if len(migrations_module.__path__) > 1: return False filenames = os.listdir(list(migrations_module.__path__)[0]) return not any(x.endswith(".py") for x in filenames if x != "__init__.py") def ask_not_null_addition(self, field_name, model_name): """Adding a NOT NULL field to a model.""" # None means quit return None def ask_not_null_alteration(self, field_name, model_name): """Changing a NULL field to NOT NULL.""" # None means quit return None def ask_rename(self, model_name, old_name, new_name, field_instance): """Was this field really renamed?""" return self.defaults.get("ask_rename", False) def ask_rename_model(self, old_model_state, new_model_state): """Was this model really renamed?""" return self.defaults.get("ask_rename_model", False) def ask_merge(self, app_label): """Do you really want to merge these migrations?""" return self.defaults.get("ask_merge", False) def ask_auto_now_add_addition(self, field_name, model_name): """Adding an auto_now_add field to a model.""" # None means quit return None class InteractiveMigrationQuestioner(MigrationQuestioner): def _boolean_input(self, question, default=None): result = input("%s " % question) if not result and default is not None: return default while not result or result[0].lower() not in "yn": result = input("Please answer yes or no: ") return result[0].lower() == "y" def _choice_input(self, question, choices): print(question) for i, choice in enumerate(choices): print(" %s) %s" % (i + 1, choice)) result = input("Select an option: ") while True: try: value = int(result) except ValueError: pass else: if 0 < value <= len(choices): return value result = input("Please select a valid option: ") def _ask_default(self, default=''): """ Prompt for a default value. The ``default`` argument allows providing a custom default value (as a string) which will be shown to the user and used as the return value if the user doesn't provide any other input. """ print("Please enter the default value now, as valid Python") if default: print( "You can accept the default '{}' by pressing 'Enter' or you " "can provide another value.".format(default) ) print("The datetime and django.utils.timezone modules are available, so you can do e.g. timezone.now") print("Type 'exit' to exit this prompt") while True: if default: prompt = "[default: {}] >>> ".format(default) else: prompt = ">>> " code = input(prompt) if not code and default: code = default if not code: print("Please enter some code, or 'exit' (with no quotes) to exit.") elif code == "exit": sys.exit(1) else: try: return eval(code, {}, {'datetime': datetime, 'timezone': timezone}) except (SyntaxError, NameError) as e: print("Invalid input: %s" % e) def ask_not_null_addition(self, field_name, model_name): """Adding a NOT NULL field to a model.""" if not self.dry_run: choice = self._choice_input( "You are trying to add a non-nullable field '%s' to %s without a default; " "we can't do that (the database needs something to populate existing rows).\n" "Please select a fix:" % (field_name, model_name), [ ("Provide a one-off default now (will be set on all existing " "rows with a null value for this column)"), "Quit, and let me add a default in models.py", ] ) if choice == 2: sys.exit(3) else: return self._ask_default() return None def ask_not_null_alteration(self, field_name, model_name): """Changing a NULL field to NOT NULL.""" if not self.dry_run: choice = self._choice_input( "You are trying to change the nullable field '%s' on %s to non-nullable " "without a default; we can't do that (the database needs something to " "populate existing rows).\n" "Please select a fix:" % (field_name, model_name), [ ("Provide a one-off default now (will be set on all existing " "rows with a null value for this column)"), ("Ignore for now, and let me handle existing rows with NULL myself " "(e.g. because you added a RunPython or RunSQL operation to handle " "NULL values in a previous data migration)"), "Quit, and let me add a default in models.py", ] ) if choice == 2: return NOT_PROVIDED elif choice == 3: sys.exit(3) else: return self._ask_default() return None def ask_rename(self, model_name, old_name, new_name, field_instance): """Was this field really renamed?""" msg = "Did you rename %s.%s to %s.%s (a %s)? [y/N]" return self._boolean_input(msg % (model_name, old_name, model_name, new_name, field_instance.__class__.__name__), False) def ask_rename_model(self, old_model_state, new_model_state): """Was this model really renamed?""" msg = "Did you rename the %s.%s model to %s? [y/N]" return self._boolean_input(msg % (old_model_state.app_label, old_model_state.name, new_model_state.name), False) def ask_merge(self, app_label): return self._boolean_input( "\nMerging will only work if the operations printed above do not conflict\n" + "with each other (working on different fields or models)\n" + "Do you want to merge these migration branches? [y/N]", False, ) def ask_auto_now_add_addition(self, field_name, model_name): """Adding an auto_now_add field to a model.""" if not self.dry_run: choice = self._choice_input( "You are trying to add the field '{}' with 'auto_now_add=True' " "to {} without a default; the database needs something to " "populate existing rows.\n".format(field_name, model_name), [ "Provide a one-off default now (will be set on all " "existing rows)", "Quit, and let me add a default in models.py", ] ) if choice == 2: sys.exit(3) else: return self._ask_default(default='timezone.now') return None class NonInteractiveMigrationQuestioner(MigrationQuestioner): def ask_not_null_addition(self, field_name, model_name): # We can't ask the user, so act like the user aborted. sys.exit(3) def ask_not_null_alteration(self, field_name, model_name): # We can't ask the user, so set as not provided. return NOT_PROVIDED def ask_auto_now_add_addition(self, field_name, model_name): # We can't ask the user, so act like the user aborted. sys.exit(3)
40051c858a4d6a46a6f9a2cec5e68ecddef6533a27ba4daee2c05443c68060f5
import functools import re from itertools import chain from django.conf import settings from django.db import models from django.db.migrations import operations from django.db.migrations.migration import Migration from django.db.migrations.operations.models import AlterModelOptions from django.db.migrations.optimizer import MigrationOptimizer from django.db.migrations.questioner import MigrationQuestioner from django.db.migrations.utils import ( COMPILED_REGEX_TYPE, RegexObject, get_migration_name_timestamp, ) from django.utils.topological_sort import stable_topological_sort class MigrationAutodetector: """ Take a pair of ProjectStates and compare them to see what the first would need doing to make it match the second (the second usually being the project's current state). Note that this naturally operates on entire projects at a time, as it's likely that changes interact (for example, you can't add a ForeignKey without having a migration to add the table it depends on first). A user interface may offer single-app usage if it wishes, with the caveat that it may not always be possible. """ def __init__(self, from_state, to_state, questioner=None): self.from_state = from_state self.to_state = to_state self.questioner = questioner or MigrationQuestioner() self.existing_apps = {app for app, model in from_state.models} def changes(self, graph, trim_to_apps=None, convert_apps=None, migration_name=None): """ Main entry point to produce a list of applicable changes. Take a graph to base names on and an optional set of apps to try and restrict to (restriction is not guaranteed) """ changes = self._detect_changes(convert_apps, graph) changes = self.arrange_for_graph(changes, graph, migration_name) if trim_to_apps: changes = self._trim_to_apps(changes, trim_to_apps) return changes def deep_deconstruct(self, obj): """ Recursive deconstruction for a field and its arguments. Used for full comparison for rename/alter; sometimes a single-level deconstruction will not compare correctly. """ if isinstance(obj, list): return [self.deep_deconstruct(value) for value in obj] elif isinstance(obj, tuple): return tuple(self.deep_deconstruct(value) for value in obj) elif isinstance(obj, dict): return { key: self.deep_deconstruct(value) for key, value in obj.items() } elif isinstance(obj, functools.partial): return (obj.func, self.deep_deconstruct(obj.args), self.deep_deconstruct(obj.keywords)) elif isinstance(obj, COMPILED_REGEX_TYPE): return RegexObject(obj) elif isinstance(obj, type): # If this is a type that implements 'deconstruct' as an instance method, # avoid treating this as being deconstructible itself - see #22951 return obj elif hasattr(obj, 'deconstruct'): deconstructed = obj.deconstruct() if isinstance(obj, models.Field): # we have a field which also returns a name deconstructed = deconstructed[1:] path, args, kwargs = deconstructed return ( path, [self.deep_deconstruct(value) for value in args], { key: self.deep_deconstruct(value) for key, value in kwargs.items() }, ) else: return obj def only_relation_agnostic_fields(self, fields): """ Return a definition of the fields that ignores field names and what related fields actually relate to. Used for detecting renames (as the related fields change during renames). """ fields_def = [] for name, field in sorted(fields.items()): deconstruction = self.deep_deconstruct(field) if field.remote_field and field.remote_field.model: del deconstruction[2]['to'] fields_def.append(deconstruction) return fields_def def _detect_changes(self, convert_apps=None, graph=None): """ Return a dict of migration plans which will achieve the change from from_state to to_state. The dict has app labels as keys and a list of migrations as values. The resulting migrations aren't specially named, but the names do matter for dependencies inside the set. convert_apps is the list of apps to convert to use migrations (i.e. to make initial migrations for, in the usual case) graph is an optional argument that, if provided, can help improve dependency generation and avoid potential circular dependencies. """ # The first phase is generating all the operations for each app # and gathering them into a big per-app list. # Then go through that list, order it, and split into migrations to # resolve dependencies caused by M2Ms and FKs. self.generated_operations = {} self.altered_indexes = {} self.altered_constraints = {} # Prepare some old/new state and model lists, separating # proxy models and ignoring unmigrated apps. self.old_apps = self.from_state.concrete_apps self.new_apps = self.to_state.apps self.old_model_keys = set() self.old_proxy_keys = set() self.old_unmanaged_keys = set() self.new_model_keys = set() self.new_proxy_keys = set() self.new_unmanaged_keys = set() for al, mn in self.from_state.models: model = self.old_apps.get_model(al, mn) if not model._meta.managed: self.old_unmanaged_keys.add((al, mn)) elif al not in self.from_state.real_apps: if model._meta.proxy: self.old_proxy_keys.add((al, mn)) else: self.old_model_keys.add((al, mn)) for al, mn in self.to_state.models: model = self.new_apps.get_model(al, mn) if not model._meta.managed: self.new_unmanaged_keys.add((al, mn)) elif ( al not in self.from_state.real_apps or (convert_apps and al in convert_apps) ): if model._meta.proxy: self.new_proxy_keys.add((al, mn)) else: self.new_model_keys.add((al, mn)) # Renames have to come first self.generate_renamed_models() # Prepare lists of fields and generate through model map self._prepare_field_lists() self._generate_through_model_map() # Generate non-rename model operations self.generate_deleted_models() self.generate_created_models() self.generate_deleted_proxies() self.generate_created_proxies() self.generate_altered_options() self.generate_altered_managers() # Create the altered indexes and store them in self.altered_indexes. # This avoids the same computation in generate_removed_indexes() # and generate_added_indexes(). self.create_altered_indexes() self.create_altered_constraints() # Generate index removal operations before field is removed self.generate_removed_constraints() self.generate_removed_indexes() # Generate field operations self.generate_renamed_fields() self.generate_removed_fields() self.generate_added_fields() self.generate_altered_fields() self.generate_altered_unique_together() self.generate_altered_index_together() self.generate_added_indexes() self.generate_added_constraints() self.generate_altered_db_table() self.generate_altered_order_with_respect_to() self._sort_migrations() self._build_migration_list(graph) self._optimize_migrations() return self.migrations def _prepare_field_lists(self): """ Prepare field lists and a list of the fields that used through models in the old state so dependencies can be made from the through model deletion to the field that uses it. """ self.kept_model_keys = self.old_model_keys & self.new_model_keys self.kept_proxy_keys = self.old_proxy_keys & self.new_proxy_keys self.kept_unmanaged_keys = self.old_unmanaged_keys & self.new_unmanaged_keys self.through_users = {} self.old_field_keys = { (app_label, model_name, field_name) for app_label, model_name in self.kept_model_keys for field_name in self.from_state.models[ app_label, self.renamed_models.get((app_label, model_name), model_name) ].fields } self.new_field_keys = { (app_label, model_name, field_name) for app_label, model_name in self.kept_model_keys for field_name in self.to_state.models[app_label, model_name].fields } def _generate_through_model_map(self): """Through model map generation.""" for app_label, model_name in sorted(self.old_model_keys): old_model_name = self.renamed_models.get((app_label, model_name), model_name) old_model_state = self.from_state.models[app_label, old_model_name] for field_name in old_model_state.fields: old_field = self.old_apps.get_model(app_label, old_model_name)._meta.get_field(field_name) if (hasattr(old_field, "remote_field") and getattr(old_field.remote_field, "through", None) and not old_field.remote_field.through._meta.auto_created): through_key = ( old_field.remote_field.through._meta.app_label, old_field.remote_field.through._meta.model_name, ) self.through_users[through_key] = (app_label, old_model_name, field_name) @staticmethod def _resolve_dependency(dependency): """ Return the resolved dependency and a boolean denoting whether or not it was swappable. """ if dependency[0] != '__setting__': return dependency, False resolved_app_label, resolved_object_name = getattr(settings, dependency[1]).split('.') return (resolved_app_label, resolved_object_name.lower()) + dependency[2:], True def _build_migration_list(self, graph=None): """ Chop the lists of operations up into migrations with dependencies on each other. Do this by going through an app's list of operations until one is found that has an outgoing dependency that isn't in another app's migration yet (hasn't been chopped off its list). Then chop off the operations before it into a migration and move onto the next app. If the loops completes without doing anything, there's a circular dependency (which _should_ be impossible as the operations are all split at this point so they can't depend and be depended on). """ self.migrations = {} num_ops = sum(len(x) for x in self.generated_operations.values()) chop_mode = False while num_ops: # On every iteration, we step through all the apps and see if there # is a completed set of operations. # If we find that a subset of the operations are complete we can # try to chop it off from the rest and continue, but we only # do this if we've already been through the list once before # without any chopping and nothing has changed. for app_label in sorted(self.generated_operations): chopped = [] dependencies = set() for operation in list(self.generated_operations[app_label]): deps_satisfied = True operation_dependencies = set() for dep in operation._auto_deps: # Temporarily resolve the swappable dependency to # prevent circular references. While keeping the # dependency checks on the resolved model, add the # swappable dependencies. original_dep = dep dep, is_swappable_dep = self._resolve_dependency(dep) if dep[0] != app_label: # External app dependency. See if it's not yet # satisfied. for other_operation in self.generated_operations.get(dep[0], []): if self.check_dependency(other_operation, dep): deps_satisfied = False break if not deps_satisfied: break else: if is_swappable_dep: operation_dependencies.add((original_dep[0], original_dep[1])) elif dep[0] in self.migrations: operation_dependencies.add((dep[0], self.migrations[dep[0]][-1].name)) else: # If we can't find the other app, we add a first/last dependency, # but only if we've already been through once and checked everything if chop_mode: # If the app already exists, we add a dependency on the last migration, # as we don't know which migration contains the target field. # If it's not yet migrated or has no migrations, we use __first__ if graph and graph.leaf_nodes(dep[0]): operation_dependencies.add(graph.leaf_nodes(dep[0])[0]) else: operation_dependencies.add((dep[0], "__first__")) else: deps_satisfied = False if deps_satisfied: chopped.append(operation) dependencies.update(operation_dependencies) del self.generated_operations[app_label][0] else: break # Make a migration! Well, only if there's stuff to put in it if dependencies or chopped: if not self.generated_operations[app_label] or chop_mode: subclass = type("Migration", (Migration,), {"operations": [], "dependencies": []}) instance = subclass("auto_%i" % (len(self.migrations.get(app_label, [])) + 1), app_label) instance.dependencies = list(dependencies) instance.operations = chopped instance.initial = app_label not in self.existing_apps self.migrations.setdefault(app_label, []).append(instance) chop_mode = False else: self.generated_operations[app_label] = chopped + self.generated_operations[app_label] new_num_ops = sum(len(x) for x in self.generated_operations.values()) if new_num_ops == num_ops: if not chop_mode: chop_mode = True else: raise ValueError("Cannot resolve operation dependencies: %r" % self.generated_operations) num_ops = new_num_ops def _sort_migrations(self): """ Reorder to make things possible. Reordering may be needed so FKs work nicely inside the same app. """ for app_label, ops in sorted(self.generated_operations.items()): # construct a dependency graph for intra-app dependencies dependency_graph = {op: set() for op in ops} for op in ops: for dep in op._auto_deps: # Resolve intra-app dependencies to handle circular # references involving a swappable model. dep = self._resolve_dependency(dep)[0] if dep[0] == app_label: for op2 in ops: if self.check_dependency(op2, dep): dependency_graph[op].add(op2) # we use a stable sort for deterministic tests & general behavior self.generated_operations[app_label] = stable_topological_sort(ops, dependency_graph) def _optimize_migrations(self): # Add in internal dependencies among the migrations for app_label, migrations in self.migrations.items(): for m1, m2 in zip(migrations, migrations[1:]): m2.dependencies.append((app_label, m1.name)) # De-dupe dependencies for migrations in self.migrations.values(): for migration in migrations: migration.dependencies = list(set(migration.dependencies)) # Optimize migrations for app_label, migrations in self.migrations.items(): for migration in migrations: migration.operations = MigrationOptimizer().optimize(migration.operations, app_label) def check_dependency(self, operation, dependency): """ Return True if the given operation depends on the given dependency, False otherwise. """ # Created model if dependency[2] is None and dependency[3] is True: return ( isinstance(operation, operations.CreateModel) and operation.name_lower == dependency[1].lower() ) # Created field elif dependency[2] is not None and dependency[3] is True: return ( ( isinstance(operation, operations.CreateModel) and operation.name_lower == dependency[1].lower() and any(dependency[2] == x for x, y in operation.fields) ) or ( isinstance(operation, operations.AddField) and operation.model_name_lower == dependency[1].lower() and operation.name_lower == dependency[2].lower() ) ) # Removed field elif dependency[2] is not None and dependency[3] is False: return ( isinstance(operation, operations.RemoveField) and operation.model_name_lower == dependency[1].lower() and operation.name_lower == dependency[2].lower() ) # Removed model elif dependency[2] is None and dependency[3] is False: return ( isinstance(operation, operations.DeleteModel) and operation.name_lower == dependency[1].lower() ) # Field being altered elif dependency[2] is not None and dependency[3] == "alter": return ( isinstance(operation, operations.AlterField) and operation.model_name_lower == dependency[1].lower() and operation.name_lower == dependency[2].lower() ) # order_with_respect_to being unset for a field elif dependency[2] is not None and dependency[3] == "order_wrt_unset": return ( isinstance(operation, operations.AlterOrderWithRespectTo) and operation.name_lower == dependency[1].lower() and (operation.order_with_respect_to or "").lower() != dependency[2].lower() ) # Field is removed and part of an index/unique_together elif dependency[2] is not None and dependency[3] == "foo_together_change": return ( isinstance(operation, (operations.AlterUniqueTogether, operations.AlterIndexTogether)) and operation.name_lower == dependency[1].lower() ) # Unknown dependency. Raise an error. else: raise ValueError("Can't handle dependency %r" % (dependency,)) def add_operation(self, app_label, operation, dependencies=None, beginning=False): # Dependencies are (app_label, model_name, field_name, create/delete as True/False) operation._auto_deps = dependencies or [] if beginning: self.generated_operations.setdefault(app_label, []).insert(0, operation) else: self.generated_operations.setdefault(app_label, []).append(operation) def swappable_first_key(self, item): """ Place potential swappable models first in lists of created models (only real way to solve #22783). """ try: model = self.new_apps.get_model(item[0], item[1]) base_names = [base.__name__ for base in model.__bases__] string_version = "%s.%s" % (item[0], item[1]) if ( model._meta.swappable or "AbstractUser" in base_names or "AbstractBaseUser" in base_names or settings.AUTH_USER_MODEL.lower() == string_version.lower() ): return ("___" + item[0], "___" + item[1]) except LookupError: pass return item def generate_renamed_models(self): """ Find any renamed models, generate the operations for them, and remove the old entry from the model lists. Must be run before other model-level generation. """ self.renamed_models = {} self.renamed_models_rel = {} added_models = self.new_model_keys - self.old_model_keys for app_label, model_name in sorted(added_models): model_state = self.to_state.models[app_label, model_name] model_fields_def = self.only_relation_agnostic_fields(model_state.fields) removed_models = self.old_model_keys - self.new_model_keys for rem_app_label, rem_model_name in removed_models: if rem_app_label == app_label: rem_model_state = self.from_state.models[rem_app_label, rem_model_name] rem_model_fields_def = self.only_relation_agnostic_fields(rem_model_state.fields) if model_fields_def == rem_model_fields_def: if self.questioner.ask_rename_model(rem_model_state, model_state): model_opts = self.new_apps.get_model(app_label, model_name)._meta dependencies = [] for field in model_opts.get_fields(): if field.is_relation: dependencies.extend(self._get_dependencies_for_foreign_key(field)) self.add_operation( app_label, operations.RenameModel( old_name=rem_model_state.name, new_name=model_state.name, ), dependencies=dependencies, ) self.renamed_models[app_label, model_name] = rem_model_name renamed_models_rel_key = '%s.%s' % ( rem_model_state.app_label, rem_model_state.name_lower, ) self.renamed_models_rel[renamed_models_rel_key] = '%s.%s' % ( model_state.app_label, model_state.name_lower, ) self.old_model_keys.remove((rem_app_label, rem_model_name)) self.old_model_keys.add((app_label, model_name)) break def generate_created_models(self): """ Find all new models (both managed and unmanaged) and make create operations for them as well as separate operations to create any foreign key or M2M relationships (these are optimized later, if possible). Defer any model options that refer to collections of fields that might be deferred (e.g. unique_together, index_together). """ old_keys = self.old_model_keys | self.old_unmanaged_keys added_models = self.new_model_keys - old_keys added_unmanaged_models = self.new_unmanaged_keys - old_keys all_added_models = chain( sorted(added_models, key=self.swappable_first_key, reverse=True), sorted(added_unmanaged_models, key=self.swappable_first_key, reverse=True) ) for app_label, model_name in all_added_models: model_state = self.to_state.models[app_label, model_name] model_opts = self.new_apps.get_model(app_label, model_name)._meta # Gather related fields related_fields = {} primary_key_rel = None for field in model_opts.local_fields: if field.remote_field: if field.remote_field.model: if field.primary_key: primary_key_rel = field.remote_field.model elif not field.remote_field.parent_link: related_fields[field.name] = field # through will be none on M2Ms on swapped-out models; # we can treat lack of through as auto_created=True, though. if (getattr(field.remote_field, "through", None) and not field.remote_field.through._meta.auto_created): related_fields[field.name] = field for field in model_opts.local_many_to_many: if field.remote_field.model: related_fields[field.name] = field if getattr(field.remote_field, "through", None) and not field.remote_field.through._meta.auto_created: related_fields[field.name] = field # Are there indexes/unique|index_together to defer? indexes = model_state.options.pop('indexes') constraints = model_state.options.pop('constraints') unique_together = model_state.options.pop('unique_together', None) index_together = model_state.options.pop('index_together', None) order_with_respect_to = model_state.options.pop('order_with_respect_to', None) # Depend on the deletion of any possible proxy version of us dependencies = [ (app_label, model_name, None, False), ] # Depend on all bases for base in model_state.bases: if isinstance(base, str) and "." in base: base_app_label, base_name = base.split(".", 1) dependencies.append((base_app_label, base_name, None, True)) # Depend on the other end of the primary key if it's a relation if primary_key_rel: dependencies.append(( primary_key_rel._meta.app_label, primary_key_rel._meta.object_name, None, True )) # Generate creation operation self.add_operation( app_label, operations.CreateModel( name=model_state.name, fields=[d for d in model_state.fields.items() if d[0] not in related_fields], options=model_state.options, bases=model_state.bases, managers=model_state.managers, ), dependencies=dependencies, beginning=True, ) # Don't add operations which modify the database for unmanaged models if not model_opts.managed: continue # Generate operations for each related field for name, field in sorted(related_fields.items()): dependencies = self._get_dependencies_for_foreign_key(field) # Depend on our own model being created dependencies.append((app_label, model_name, None, True)) # Make operation self.add_operation( app_label, operations.AddField( model_name=model_name, name=name, field=field, ), dependencies=list(set(dependencies)), ) # Generate other opns related_dependencies = [ (app_label, model_name, name, True) for name in sorted(related_fields) ] related_dependencies.append((app_label, model_name, None, True)) for index in indexes: self.add_operation( app_label, operations.AddIndex( model_name=model_name, index=index, ), dependencies=related_dependencies, ) for constraint in constraints: self.add_operation( app_label, operations.AddConstraint( model_name=model_name, constraint=constraint, ), dependencies=related_dependencies, ) if unique_together: self.add_operation( app_label, operations.AlterUniqueTogether( name=model_name, unique_together=unique_together, ), dependencies=related_dependencies ) if index_together: self.add_operation( app_label, operations.AlterIndexTogether( name=model_name, index_together=index_together, ), dependencies=related_dependencies ) if order_with_respect_to: self.add_operation( app_label, operations.AlterOrderWithRespectTo( name=model_name, order_with_respect_to=order_with_respect_to, ), dependencies=[ (app_label, model_name, order_with_respect_to, True), (app_label, model_name, None, True), ] ) # Fix relationships if the model changed from a proxy model to a # concrete model. if (app_label, model_name) in self.old_proxy_keys: for related_object in model_opts.related_objects: self.add_operation( related_object.related_model._meta.app_label, operations.AlterField( model_name=related_object.related_model._meta.object_name, name=related_object.field.name, field=related_object.field, ), dependencies=[(app_label, model_name, None, True)], ) def generate_created_proxies(self): """ Make CreateModel statements for proxy models. Use the same statements as that way there's less code duplication, but for proxy models it's safe to skip all the pointless field stuff and chuck out an operation. """ added = self.new_proxy_keys - self.old_proxy_keys for app_label, model_name in sorted(added): model_state = self.to_state.models[app_label, model_name] assert model_state.options.get("proxy") # Depend on the deletion of any possible non-proxy version of us dependencies = [ (app_label, model_name, None, False), ] # Depend on all bases for base in model_state.bases: if isinstance(base, str) and "." in base: base_app_label, base_name = base.split(".", 1) dependencies.append((base_app_label, base_name, None, True)) # Generate creation operation self.add_operation( app_label, operations.CreateModel( name=model_state.name, fields=[], options=model_state.options, bases=model_state.bases, managers=model_state.managers, ), # Depend on the deletion of any possible non-proxy version of us dependencies=dependencies, ) def generate_deleted_models(self): """ Find all deleted models (managed and unmanaged) and make delete operations for them as well as separate operations to delete any foreign key or M2M relationships (these are optimized later, if possible). Also bring forward removal of any model options that refer to collections of fields - the inverse of generate_created_models(). """ new_keys = self.new_model_keys | self.new_unmanaged_keys deleted_models = self.old_model_keys - new_keys deleted_unmanaged_models = self.old_unmanaged_keys - new_keys all_deleted_models = chain(sorted(deleted_models), sorted(deleted_unmanaged_models)) for app_label, model_name in all_deleted_models: model_state = self.from_state.models[app_label, model_name] model = self.old_apps.get_model(app_label, model_name) # Gather related fields related_fields = {} for field in model._meta.local_fields: if field.remote_field: if field.remote_field.model: related_fields[field.name] = field # through will be none on M2Ms on swapped-out models; # we can treat lack of through as auto_created=True, though. if (getattr(field.remote_field, "through", None) and not field.remote_field.through._meta.auto_created): related_fields[field.name] = field for field in model._meta.local_many_to_many: if field.remote_field.model: related_fields[field.name] = field if getattr(field.remote_field, "through", None) and not field.remote_field.through._meta.auto_created: related_fields[field.name] = field # Generate option removal first unique_together = model_state.options.pop('unique_together', None) index_together = model_state.options.pop('index_together', None) if unique_together: self.add_operation( app_label, operations.AlterUniqueTogether( name=model_name, unique_together=None, ) ) if index_together: self.add_operation( app_label, operations.AlterIndexTogether( name=model_name, index_together=None, ) ) # Then remove each related field for name in sorted(related_fields): self.add_operation( app_label, operations.RemoveField( model_name=model_name, name=name, ) ) # Finally, remove the model. # This depends on both the removal/alteration of all incoming fields # and the removal of all its own related fields, and if it's # a through model the field that references it. dependencies = [] for related_object in model._meta.related_objects: related_object_app_label = related_object.related_model._meta.app_label object_name = related_object.related_model._meta.object_name field_name = related_object.field.name dependencies.append((related_object_app_label, object_name, field_name, False)) if not related_object.many_to_many: dependencies.append((related_object_app_label, object_name, field_name, "alter")) for name in sorted(related_fields): dependencies.append((app_label, model_name, name, False)) # We're referenced in another field's through= through_user = self.through_users.get((app_label, model_state.name_lower)) if through_user: dependencies.append((through_user[0], through_user[1], through_user[2], False)) # Finally, make the operation, deduping any dependencies self.add_operation( app_label, operations.DeleteModel( name=model_state.name, ), dependencies=list(set(dependencies)), ) def generate_deleted_proxies(self): """Make DeleteModel options for proxy models.""" deleted = self.old_proxy_keys - self.new_proxy_keys for app_label, model_name in sorted(deleted): model_state = self.from_state.models[app_label, model_name] assert model_state.options.get("proxy") self.add_operation( app_label, operations.DeleteModel( name=model_state.name, ), ) def generate_renamed_fields(self): """Work out renamed fields.""" self.renamed_fields = {} for app_label, model_name, field_name in sorted(self.new_field_keys - self.old_field_keys): old_model_name = self.renamed_models.get((app_label, model_name), model_name) old_model_state = self.from_state.models[app_label, old_model_name] field = self.new_apps.get_model(app_label, model_name)._meta.get_field(field_name) # Scan to see if this is actually a rename! field_dec = self.deep_deconstruct(field) for rem_app_label, rem_model_name, rem_field_name in sorted(self.old_field_keys - self.new_field_keys): if rem_app_label == app_label and rem_model_name == model_name: old_field = old_model_state.fields[rem_field_name] old_field_dec = self.deep_deconstruct(old_field) if field.remote_field and field.remote_field.model and 'to' in old_field_dec[2]: old_rel_to = old_field_dec[2]['to'] if old_rel_to in self.renamed_models_rel: old_field_dec[2]['to'] = self.renamed_models_rel[old_rel_to] old_field.set_attributes_from_name(rem_field_name) old_db_column = old_field.get_attname_column()[1] if (old_field_dec == field_dec or ( # Was the field renamed and db_column equal to the # old field's column added? old_field_dec[0:2] == field_dec[0:2] and dict(old_field_dec[2], db_column=old_db_column) == field_dec[2])): if self.questioner.ask_rename(model_name, rem_field_name, field_name, field): self.add_operation( app_label, operations.RenameField( model_name=model_name, old_name=rem_field_name, new_name=field_name, ) ) self.old_field_keys.remove((rem_app_label, rem_model_name, rem_field_name)) self.old_field_keys.add((app_label, model_name, field_name)) self.renamed_fields[app_label, model_name, field_name] = rem_field_name break def generate_added_fields(self): """Make AddField operations.""" for app_label, model_name, field_name in sorted(self.new_field_keys - self.old_field_keys): self._generate_added_field(app_label, model_name, field_name) def _generate_added_field(self, app_label, model_name, field_name): field = self.new_apps.get_model(app_label, model_name)._meta.get_field(field_name) # Fields that are foreignkeys/m2ms depend on stuff dependencies = [] if field.remote_field and field.remote_field.model: dependencies.extend(self._get_dependencies_for_foreign_key(field)) # You can't just add NOT NULL fields with no default or fields # which don't allow empty strings as default. time_fields = (models.DateField, models.DateTimeField, models.TimeField) preserve_default = ( field.null or field.has_default() or field.many_to_many or (field.blank and field.empty_strings_allowed) or (isinstance(field, time_fields) and field.auto_now) ) if not preserve_default: field = field.clone() if isinstance(field, time_fields) and field.auto_now_add: field.default = self.questioner.ask_auto_now_add_addition(field_name, model_name) else: field.default = self.questioner.ask_not_null_addition(field_name, model_name) self.add_operation( app_label, operations.AddField( model_name=model_name, name=field_name, field=field, preserve_default=preserve_default, ), dependencies=dependencies, ) def generate_removed_fields(self): """Make RemoveField operations.""" for app_label, model_name, field_name in sorted(self.old_field_keys - self.new_field_keys): self._generate_removed_field(app_label, model_name, field_name) def _generate_removed_field(self, app_label, model_name, field_name): self.add_operation( app_label, operations.RemoveField( model_name=model_name, name=field_name, ), # We might need to depend on the removal of an # order_with_respect_to or index/unique_together operation; # this is safely ignored if there isn't one dependencies=[ (app_label, model_name, field_name, "order_wrt_unset"), (app_label, model_name, field_name, "foo_together_change"), ], ) def generate_altered_fields(self): """ Make AlterField operations, or possibly RemovedField/AddField if alter isn's possible. """ for app_label, model_name, field_name in sorted(self.old_field_keys & self.new_field_keys): # Did the field change? old_model_name = self.renamed_models.get((app_label, model_name), model_name) old_field_name = self.renamed_fields.get((app_label, model_name, field_name), field_name) old_field = self.old_apps.get_model(app_label, old_model_name)._meta.get_field(old_field_name) new_field = self.new_apps.get_model(app_label, model_name)._meta.get_field(field_name) dependencies = [] # Implement any model renames on relations; these are handled by RenameModel # so we need to exclude them from the comparison if hasattr(new_field, "remote_field") and getattr(new_field.remote_field, "model", None): rename_key = ( new_field.remote_field.model._meta.app_label, new_field.remote_field.model._meta.model_name, ) if rename_key in self.renamed_models: new_field.remote_field.model = old_field.remote_field.model # Handle ForeignKey which can only have a single to_field. remote_field_name = getattr(new_field.remote_field, 'field_name', None) if remote_field_name: to_field_rename_key = rename_key + (remote_field_name,) if to_field_rename_key in self.renamed_fields: # Repoint both model and field name because to_field # inclusion in ForeignKey.deconstruct() is based on # both. new_field.remote_field.model = old_field.remote_field.model new_field.remote_field.field_name = old_field.remote_field.field_name # Handle ForeignObjects which can have multiple from_fields/to_fields. from_fields = getattr(new_field, 'from_fields', None) if from_fields: from_rename_key = (app_label, model_name) new_field.from_fields = tuple([ self.renamed_fields.get(from_rename_key + (from_field,), from_field) for from_field in from_fields ]) new_field.to_fields = tuple([ self.renamed_fields.get(rename_key + (to_field,), to_field) for to_field in new_field.to_fields ]) dependencies.extend(self._get_dependencies_for_foreign_key(new_field)) if hasattr(new_field, "remote_field") and getattr(new_field.remote_field, "through", None): rename_key = ( new_field.remote_field.through._meta.app_label, new_field.remote_field.through._meta.model_name, ) if rename_key in self.renamed_models: new_field.remote_field.through = old_field.remote_field.through old_field_dec = self.deep_deconstruct(old_field) new_field_dec = self.deep_deconstruct(new_field) if old_field_dec != new_field_dec: both_m2m = old_field.many_to_many and new_field.many_to_many neither_m2m = not old_field.many_to_many and not new_field.many_to_many if both_m2m or neither_m2m: # Either both fields are m2m or neither is preserve_default = True if (old_field.null and not new_field.null and not new_field.has_default() and not new_field.many_to_many): field = new_field.clone() new_default = self.questioner.ask_not_null_alteration(field_name, model_name) if new_default is not models.NOT_PROVIDED: field.default = new_default preserve_default = False else: field = new_field self.add_operation( app_label, operations.AlterField( model_name=model_name, name=field_name, field=field, preserve_default=preserve_default, ), dependencies=dependencies, ) else: # We cannot alter between m2m and concrete fields self._generate_removed_field(app_label, model_name, field_name) self._generate_added_field(app_label, model_name, field_name) def create_altered_indexes(self): option_name = operations.AddIndex.option_name for app_label, model_name in sorted(self.kept_model_keys): old_model_name = self.renamed_models.get((app_label, model_name), model_name) old_model_state = self.from_state.models[app_label, old_model_name] new_model_state = self.to_state.models[app_label, model_name] old_indexes = old_model_state.options[option_name] new_indexes = new_model_state.options[option_name] add_idx = [idx for idx in new_indexes if idx not in old_indexes] rem_idx = [idx for idx in old_indexes if idx not in new_indexes] self.altered_indexes.update({ (app_label, model_name): { 'added_indexes': add_idx, 'removed_indexes': rem_idx, } }) def generate_added_indexes(self): for (app_label, model_name), alt_indexes in self.altered_indexes.items(): for index in alt_indexes['added_indexes']: self.add_operation( app_label, operations.AddIndex( model_name=model_name, index=index, ) ) def generate_removed_indexes(self): for (app_label, model_name), alt_indexes in self.altered_indexes.items(): for index in alt_indexes['removed_indexes']: self.add_operation( app_label, operations.RemoveIndex( model_name=model_name, name=index.name, ) ) def create_altered_constraints(self): option_name = operations.AddConstraint.option_name for app_label, model_name in sorted(self.kept_model_keys): old_model_name = self.renamed_models.get((app_label, model_name), model_name) old_model_state = self.from_state.models[app_label, old_model_name] new_model_state = self.to_state.models[app_label, model_name] old_constraints = old_model_state.options[option_name] new_constraints = new_model_state.options[option_name] add_constraints = [c for c in new_constraints if c not in old_constraints] rem_constraints = [c for c in old_constraints if c not in new_constraints] self.altered_constraints.update({ (app_label, model_name): { 'added_constraints': add_constraints, 'removed_constraints': rem_constraints, } }) def generate_added_constraints(self): for (app_label, model_name), alt_constraints in self.altered_constraints.items(): for constraint in alt_constraints['added_constraints']: self.add_operation( app_label, operations.AddConstraint( model_name=model_name, constraint=constraint, ) ) def generate_removed_constraints(self): for (app_label, model_name), alt_constraints in self.altered_constraints.items(): for constraint in alt_constraints['removed_constraints']: self.add_operation( app_label, operations.RemoveConstraint( model_name=model_name, name=constraint.name, ) ) def _get_dependencies_for_foreign_key(self, field): # Account for FKs to swappable models swappable_setting = getattr(field, 'swappable_setting', None) if swappable_setting is not None: dep_app_label = "__setting__" dep_object_name = swappable_setting else: dep_app_label = field.remote_field.model._meta.app_label dep_object_name = field.remote_field.model._meta.object_name dependencies = [(dep_app_label, dep_object_name, None, True)] if getattr(field.remote_field, "through", None) and not field.remote_field.through._meta.auto_created: dependencies.append(( field.remote_field.through._meta.app_label, field.remote_field.through._meta.object_name, None, True, )) return dependencies def _generate_altered_foo_together(self, operation): option_name = operation.option_name for app_label, model_name in sorted(self.kept_model_keys): old_model_name = self.renamed_models.get((app_label, model_name), model_name) old_model_state = self.from_state.models[app_label, old_model_name] new_model_state = self.to_state.models[app_label, model_name] # We run the old version through the field renames to account for those old_value = old_model_state.options.get(option_name) old_value = { tuple( self.renamed_fields.get((app_label, model_name, n), n) for n in unique ) for unique in old_value } if old_value else set() new_value = new_model_state.options.get(option_name) new_value = set(new_value) if new_value else set() if old_value != new_value: dependencies = [] for foo_togethers in new_value: for field_name in foo_togethers: field = self.new_apps.get_model(app_label, model_name)._meta.get_field(field_name) if field.remote_field and field.remote_field.model: dependencies.extend(self._get_dependencies_for_foreign_key(field)) self.add_operation( app_label, operation( name=model_name, **{option_name: new_value} ), dependencies=dependencies, ) def generate_altered_unique_together(self): self._generate_altered_foo_together(operations.AlterUniqueTogether) def generate_altered_index_together(self): self._generate_altered_foo_together(operations.AlterIndexTogether) def generate_altered_db_table(self): models_to_check = self.kept_model_keys.union(self.kept_proxy_keys, self.kept_unmanaged_keys) for app_label, model_name in sorted(models_to_check): old_model_name = self.renamed_models.get((app_label, model_name), model_name) old_model_state = self.from_state.models[app_label, old_model_name] new_model_state = self.to_state.models[app_label, model_name] old_db_table_name = old_model_state.options.get('db_table') new_db_table_name = new_model_state.options.get('db_table') if old_db_table_name != new_db_table_name: self.add_operation( app_label, operations.AlterModelTable( name=model_name, table=new_db_table_name, ) ) def generate_altered_options(self): """ Work out if any non-schema-affecting options have changed and make an operation to represent them in state changes (in case Python code in migrations needs them). """ models_to_check = self.kept_model_keys.union( self.kept_proxy_keys, self.kept_unmanaged_keys, # unmanaged converted to managed self.old_unmanaged_keys & self.new_model_keys, # managed converted to unmanaged self.old_model_keys & self.new_unmanaged_keys, ) for app_label, model_name in sorted(models_to_check): old_model_name = self.renamed_models.get((app_label, model_name), model_name) old_model_state = self.from_state.models[app_label, old_model_name] new_model_state = self.to_state.models[app_label, model_name] old_options = { key: value for key, value in old_model_state.options.items() if key in AlterModelOptions.ALTER_OPTION_KEYS } new_options = { key: value for key, value in new_model_state.options.items() if key in AlterModelOptions.ALTER_OPTION_KEYS } if old_options != new_options: self.add_operation( app_label, operations.AlterModelOptions( name=model_name, options=new_options, ) ) def generate_altered_order_with_respect_to(self): for app_label, model_name in sorted(self.kept_model_keys): old_model_name = self.renamed_models.get((app_label, model_name), model_name) old_model_state = self.from_state.models[app_label, old_model_name] new_model_state = self.to_state.models[app_label, model_name] if (old_model_state.options.get("order_with_respect_to") != new_model_state.options.get("order_with_respect_to")): # Make sure it comes second if we're adding # (removal dependency is part of RemoveField) dependencies = [] if new_model_state.options.get("order_with_respect_to"): dependencies.append(( app_label, model_name, new_model_state.options["order_with_respect_to"], True, )) # Actually generate the operation self.add_operation( app_label, operations.AlterOrderWithRespectTo( name=model_name, order_with_respect_to=new_model_state.options.get('order_with_respect_to'), ), dependencies=dependencies, ) def generate_altered_managers(self): for app_label, model_name in sorted(self.kept_model_keys): old_model_name = self.renamed_models.get((app_label, model_name), model_name) old_model_state = self.from_state.models[app_label, old_model_name] new_model_state = self.to_state.models[app_label, model_name] if old_model_state.managers != new_model_state.managers: self.add_operation( app_label, operations.AlterModelManagers( name=model_name, managers=new_model_state.managers, ) ) def arrange_for_graph(self, changes, graph, migration_name=None): """ Take a result from changes() and a MigrationGraph, and fix the names and dependencies of the changes so they extend the graph from the leaf nodes for each app. """ leaves = graph.leaf_nodes() name_map = {} for app_label, migrations in list(changes.items()): if not migrations: continue # Find the app label's current leaf node app_leaf = None for leaf in leaves: if leaf[0] == app_label: app_leaf = leaf break # Do they want an initial migration for this app? if app_leaf is None and not self.questioner.ask_initial(app_label): # They don't. for migration in migrations: name_map[(app_label, migration.name)] = (app_label, "__first__") del changes[app_label] continue # Work out the next number in the sequence if app_leaf is None: next_number = 1 else: next_number = (self.parse_number(app_leaf[1]) or 0) + 1 # Name each migration for i, migration in enumerate(migrations): if i == 0 and app_leaf: migration.dependencies.append(app_leaf) if i == 0 and not app_leaf: new_name = "0001_%s" % migration_name if migration_name else "0001_initial" else: new_name = "%04i_%s" % ( next_number, migration_name or self.suggest_name(migration.operations)[:100], ) name_map[(app_label, migration.name)] = (app_label, new_name) next_number += 1 migration.name = new_name # Now fix dependencies for migrations in changes.values(): for migration in migrations: migration.dependencies = [name_map.get(d, d) for d in migration.dependencies] return changes def _trim_to_apps(self, changes, app_labels): """ Take changes from arrange_for_graph() and set of app labels, and return a modified set of changes which trims out as many migrations that are not in app_labels as possible. Note that some other migrations may still be present as they may be required dependencies. """ # Gather other app dependencies in a first pass app_dependencies = {} for app_label, migrations in changes.items(): for migration in migrations: for dep_app_label, name in migration.dependencies: app_dependencies.setdefault(app_label, set()).add(dep_app_label) required_apps = set(app_labels) # Keep resolving till there's no change old_required_apps = None while old_required_apps != required_apps: old_required_apps = set(required_apps) required_apps.update(*[app_dependencies.get(app_label, ()) for app_label in required_apps]) # Remove all migrations that aren't needed for app_label in list(changes): if app_label not in required_apps: del changes[app_label] return changes @classmethod def suggest_name(cls, ops): """ Given a set of operations, suggest a name for the migration they might represent. Names are not guaranteed to be unique, but put some effort into the fallback name to avoid VCS conflicts if possible. """ if len(ops) == 1: if isinstance(ops[0], operations.CreateModel): return ops[0].name_lower elif isinstance(ops[0], operations.DeleteModel): return "delete_%s" % ops[0].name_lower elif isinstance(ops[0], operations.AddField): return "%s_%s" % (ops[0].model_name_lower, ops[0].name_lower) elif isinstance(ops[0], operations.RemoveField): return "remove_%s_%s" % (ops[0].model_name_lower, ops[0].name_lower) elif ops: if all(isinstance(o, operations.CreateModel) for o in ops): return "_".join(sorted(o.name_lower for o in ops)) return "auto_%s" % get_migration_name_timestamp() @classmethod def parse_number(cls, name): """ Given a migration name, try to extract a number from the beginning of it. If no number is found, return None. """ match = re.match(r'^\d+', name) if match: return int(match.group()) return None
64e58d3f96c28ec57d4292f9e2ad238622a1772d8e7c4441e7e31610c46b9a41
from django.apps.registry import Apps from django.db import DatabaseError, models from django.utils.functional import classproperty from django.utils.timezone import now from .exceptions import MigrationSchemaMissing class MigrationRecorder: """ Deal with storing migration records in the database. Because this table is actually itself used for dealing with model creation, it's the one thing we can't do normally via migrations. We manually handle table creation/schema updating (using schema backend) and then have a floating model to do queries with. If a migration is unapplied its row is removed from the table. Having a row in the table always means a migration is applied. """ _migration_class = None @classproperty def Migration(cls): """ Lazy load to avoid AppRegistryNotReady if installed apps import MigrationRecorder. """ if cls._migration_class is None: class Migration(models.Model): app = models.CharField(max_length=255) name = models.CharField(max_length=255) applied = models.DateTimeField(default=now) class Meta: apps = Apps() app_label = 'migrations' db_table = 'django_migrations' def __str__(self): return 'Migration %s for %s' % (self.name, self.app) cls._migration_class = Migration return cls._migration_class def __init__(self, connection): self.connection = connection @property def migration_qs(self): return self.Migration.objects.using(self.connection.alias) def has_table(self): """Return True if the django_migrations table exists.""" with self.connection.cursor() as cursor: tables = self.connection.introspection.table_names(cursor) return self.Migration._meta.db_table in tables def ensure_schema(self): """Ensure the table exists and has the correct schema.""" # If the table's there, that's fine - we've never changed its schema # in the codebase. if self.has_table(): return # Make the table try: with self.connection.schema_editor() as editor: editor.create_model(self.Migration) except DatabaseError as exc: raise MigrationSchemaMissing("Unable to create the django_migrations table (%s)" % exc) def applied_migrations(self): """ Return a dict mapping (app_name, migration_name) to Migration instances for all applied migrations. """ if self.has_table(): return {(migration.app, migration.name): migration for migration in self.migration_qs} else: # If the django_migrations table doesn't exist, then no migrations # are applied. return {} def record_applied(self, app, name): """Record that a migration was applied.""" self.ensure_schema() self.migration_qs.create(app=app, name=name) def record_unapplied(self, app, name): """Record that a migration was unapplied.""" self.ensure_schema() self.migration_qs.filter(app=app, name=name).delete() def flush(self): """Delete all migration records. Useful for testing migrations.""" self.migration_qs.all().delete()
f6d6aa66ce6224b5e782da60a4dfc32ce4fc87ad5b8a614668fe3ac8a8cbffda
class MigrationOptimizer: """ Power the optimization process, where you provide a list of Operations and you are returned a list of equal or shorter length - operations are merged into one if possible. For example, a CreateModel and an AddField can be optimized into a new CreateModel, and CreateModel and DeleteModel can be optimized into nothing. """ def optimize(self, operations, app_label): """ Main optimization entry point. Pass in a list of Operation instances, get out a new list of Operation instances. Unfortunately, due to the scope of the optimization (two combinable operations might be separated by several hundred others), this can't be done as a peephole optimization with checks/output implemented on the Operations themselves; instead, the optimizer looks at each individual operation and scans forwards in the list to see if there are any matches, stopping at boundaries - operations which can't be optimized over (RunSQL, operations on the same field/model, etc.) The inner loop is run until the starting list is the same as the result list, and then the result is returned. This means that operation optimization must be stable and always return an equal or shorter list. """ # Internal tracking variable for test assertions about # of loops if app_label is None: raise TypeError('app_label must be a str.') self._iterations = 0 while True: result = self.optimize_inner(operations, app_label) self._iterations += 1 if result == operations: return result operations = result def optimize_inner(self, operations, app_label): """Inner optimization loop.""" new_operations = [] for i, operation in enumerate(operations): right = True # Should we reduce on the right or on the left. # Compare it to each operation after it for j, other in enumerate(operations[i + 1:]): result = operation.reduce(other, app_label) if isinstance(result, list): in_between = operations[i + 1:i + j + 1] if right: new_operations.extend(in_between) new_operations.extend(result) elif all(op.reduce(other, app_label) is True for op in in_between): # Perform a left reduction if all of the in-between # operations can optimize through other. new_operations.extend(result) new_operations.extend(in_between) else: # Otherwise keep trying. new_operations.append(operation) break new_operations.extend(operations[i + j + 2:]) return new_operations elif not result: # Can't perform a right reduction. right = False else: new_operations.append(operation) return new_operations
b42e5c4534cc8759c66f4b041703a709223af7178caa12f258c0c99988b4a10c
""" The main QuerySet implementation. This provides the public API for the ORM. """ import copy import operator import warnings from collections import namedtuple from functools import lru_cache from itertools import chain from django.conf import settings from django.core import exceptions from django.db import ( DJANGO_VERSION_PICKLE_KEY, IntegrityError, NotSupportedError, connections, router, transaction, ) from django.db.models import AutoField, DateField, DateTimeField, sql from django.db.models.constants import LOOKUP_SEP from django.db.models.deletion import Collector from django.db.models.expressions import Case, Expression, F, Value, When from django.db.models.functions import Cast, Trunc from django.db.models.query_utils import FilteredRelation, Q from django.db.models.sql.constants import CURSOR, GET_ITERATOR_CHUNK_SIZE from django.db.models.utils import resolve_callables from django.utils import timezone from django.utils.functional import cached_property, partition from django.utils.version import get_version # The maximum number of results to fetch in a get() query. MAX_GET_RESULTS = 21 # The maximum number of items to display in a QuerySet.__repr__ REPR_OUTPUT_SIZE = 20 class BaseIterable: def __init__(self, queryset, chunked_fetch=False, chunk_size=GET_ITERATOR_CHUNK_SIZE): self.queryset = queryset self.chunked_fetch = chunked_fetch self.chunk_size = chunk_size class ModelIterable(BaseIterable): """Iterable that yields a model instance for each row.""" def __iter__(self): queryset = self.queryset db = queryset.db compiler = queryset.query.get_compiler(using=db) # Execute the query. This will also fill compiler.select, klass_info, # and annotations. results = compiler.execute_sql(chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size) select, klass_info, annotation_col_map = (compiler.select, compiler.klass_info, compiler.annotation_col_map) model_cls = klass_info['model'] select_fields = klass_info['select_fields'] model_fields_start, model_fields_end = select_fields[0], select_fields[-1] + 1 init_list = [f[0].target.attname for f in select[model_fields_start:model_fields_end]] related_populators = get_related_populators(klass_info, select, db) known_related_objects = [ (field, related_objs, operator.attrgetter(*[ field.attname if from_field == 'self' else queryset.model._meta.get_field(from_field).attname for from_field in field.from_fields ])) for field, related_objs in queryset._known_related_objects.items() ] for row in compiler.results_iter(results): obj = model_cls.from_db(db, init_list, row[model_fields_start:model_fields_end]) for rel_populator in related_populators: rel_populator.populate(row, obj) if annotation_col_map: for attr_name, col_pos in annotation_col_map.items(): setattr(obj, attr_name, row[col_pos]) # Add the known related objects to the model. for field, rel_objs, rel_getter in known_related_objects: # Avoid overwriting objects loaded by, e.g., select_related(). if field.is_cached(obj): continue rel_obj_id = rel_getter(obj) try: rel_obj = rel_objs[rel_obj_id] except KeyError: pass # May happen in qs1 | qs2 scenarios. else: setattr(obj, field.name, rel_obj) yield obj class ValuesIterable(BaseIterable): """ Iterable returned by QuerySet.values() that yields a dict for each row. """ def __iter__(self): queryset = self.queryset query = queryset.query compiler = query.get_compiler(queryset.db) # extra(select=...) cols are always at the start of the row. names = [ *query.extra_select, *query.values_select, *query.annotation_select, ] indexes = range(len(names)) for row in compiler.results_iter(chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size): yield {names[i]: row[i] for i in indexes} class ValuesListIterable(BaseIterable): """ Iterable returned by QuerySet.values_list(flat=False) that yields a tuple for each row. """ def __iter__(self): queryset = self.queryset query = queryset.query compiler = query.get_compiler(queryset.db) if queryset._fields: # extra(select=...) cols are always at the start of the row. names = [ *query.extra_select, *query.values_select, *query.annotation_select, ] fields = [*queryset._fields, *(f for f in query.annotation_select if f not in queryset._fields)] if fields != names: # Reorder according to fields. index_map = {name: idx for idx, name in enumerate(names)} rowfactory = operator.itemgetter(*[index_map[f] for f in fields]) return map( rowfactory, compiler.results_iter(chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size) ) return compiler.results_iter(tuple_expected=True, chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size) class NamedValuesListIterable(ValuesListIterable): """ Iterable returned by QuerySet.values_list(named=True) that yields a namedtuple for each row. """ @staticmethod @lru_cache() def create_namedtuple_class(*names): # Cache namedtuple() with @lru_cache() since it's too slow to be # called for every QuerySet evaluation. return namedtuple('Row', names) def __iter__(self): queryset = self.queryset if queryset._fields: names = queryset._fields else: query = queryset.query names = [*query.extra_select, *query.values_select, *query.annotation_select] tuple_class = self.create_namedtuple_class(*names) new = tuple.__new__ for row in super().__iter__(): yield new(tuple_class, row) class FlatValuesListIterable(BaseIterable): """ Iterable returned by QuerySet.values_list(flat=True) that yields single values. """ def __iter__(self): queryset = self.queryset compiler = queryset.query.get_compiler(queryset.db) for row in compiler.results_iter(chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size): yield row[0] class QuerySet: """Represent a lazy database lookup for a set of objects.""" def __init__(self, model=None, query=None, using=None, hints=None): self.model = model self._db = using self._hints = hints or {} self._query = query or sql.Query(self.model) self._result_cache = None self._sticky_filter = False self._for_write = False self._prefetch_related_lookups = () self._prefetch_done = False self._known_related_objects = {} # {rel_field: {pk: rel_obj}} self._iterable_class = ModelIterable self._fields = None self._defer_next_filter = False self._deferred_filter = None @property def query(self): if self._deferred_filter: negate, args, kwargs = self._deferred_filter self._filter_or_exclude_inplace(negate, *args, **kwargs) self._deferred_filter = None return self._query @query.setter def query(self, value): self._query = value def as_manager(cls): # Address the circular dependency between `Queryset` and `Manager`. from django.db.models.manager import Manager manager = Manager.from_queryset(cls)() manager._built_with_as_manager = True return manager as_manager.queryset_only = True as_manager = classmethod(as_manager) ######################## # PYTHON MAGIC METHODS # ######################## def __deepcopy__(self, memo): """Don't populate the QuerySet's cache.""" obj = self.__class__() for k, v in self.__dict__.items(): if k == '_result_cache': obj.__dict__[k] = None else: obj.__dict__[k] = copy.deepcopy(v, memo) return obj def __getstate__(self): # Force the cache to be fully populated. self._fetch_all() return {**self.__dict__, DJANGO_VERSION_PICKLE_KEY: get_version()} def __setstate__(self, state): msg = None pickled_version = state.get(DJANGO_VERSION_PICKLE_KEY) if pickled_version: current_version = get_version() if current_version != pickled_version: msg = ( "Pickled queryset instance's Django version %s does not " "match the current version %s." % (pickled_version, current_version) ) else: msg = "Pickled queryset instance's Django version is not specified." if msg: warnings.warn(msg, RuntimeWarning, stacklevel=2) self.__dict__.update(state) def __repr__(self): data = list(self[:REPR_OUTPUT_SIZE + 1]) if len(data) > REPR_OUTPUT_SIZE: data[-1] = "...(remaining elements truncated)..." return '<%s %r>' % (self.__class__.__name__, data) def __len__(self): self._fetch_all() return len(self._result_cache) def __iter__(self): """ The queryset iterator protocol uses three nested iterators in the default case: 1. sql.compiler.execute_sql() - Returns 100 rows at time (constants.GET_ITERATOR_CHUNK_SIZE) using cursor.fetchmany(). This part is responsible for doing some column masking, and returning the rows in chunks. 2. sql.compiler.results_iter() - Returns one row at time. At this point the rows are still just tuples. In some cases the return values are converted to Python values at this location. 3. self.iterator() - Responsible for turning the rows into model objects. """ self._fetch_all() return iter(self._result_cache) def __bool__(self): self._fetch_all() return bool(self._result_cache) def __getitem__(self, k): """Retrieve an item or slice from the set of results.""" if not isinstance(k, (int, slice)): raise TypeError( 'QuerySet indices must be integers or slices, not %s.' % type(k).__name__ ) assert ((not isinstance(k, slice) and (k >= 0)) or (isinstance(k, slice) and (k.start is None or k.start >= 0) and (k.stop is None or k.stop >= 0))), \ "Negative indexing is not supported." if self._result_cache is not None: return self._result_cache[k] if isinstance(k, slice): qs = self._chain() if k.start is not None: start = int(k.start) else: start = None if k.stop is not None: stop = int(k.stop) else: stop = None qs.query.set_limits(start, stop) return list(qs)[::k.step] if k.step else qs qs = self._chain() qs.query.set_limits(k, k + 1) qs._fetch_all() return qs._result_cache[0] def __class_getitem__(cls, *args, **kwargs): return cls def __and__(self, other): self._merge_sanity_check(other) if isinstance(other, EmptyQuerySet): return other if isinstance(self, EmptyQuerySet): return self combined = self._chain() combined._merge_known_related_objects(other) combined.query.combine(other.query, sql.AND) return combined def __or__(self, other): self._merge_sanity_check(other) if isinstance(self, EmptyQuerySet): return other if isinstance(other, EmptyQuerySet): return self query = self if self.query.can_filter() else self.model._base_manager.filter(pk__in=self.values('pk')) combined = query._chain() combined._merge_known_related_objects(other) if not other.query.can_filter(): other = other.model._base_manager.filter(pk__in=other.values('pk')) combined.query.combine(other.query, sql.OR) return combined #################################### # METHODS THAT DO DATABASE QUERIES # #################################### def _iterator(self, use_chunked_fetch, chunk_size): yield from self._iterable_class(self, chunked_fetch=use_chunked_fetch, chunk_size=chunk_size) def iterator(self, chunk_size=2000): """ An iterator over the results from applying this QuerySet to the database. """ if chunk_size <= 0: raise ValueError('Chunk size must be strictly positive.') use_chunked_fetch = not connections[self.db].settings_dict.get('DISABLE_SERVER_SIDE_CURSORS') return self._iterator(use_chunked_fetch, chunk_size) def aggregate(self, *args, **kwargs): """ Return a dictionary containing the calculations (aggregation) over the current queryset. If args is present the expression is passed as a kwarg using the Aggregate object's default alias. """ if self.query.distinct_fields: raise NotImplementedError("aggregate() + distinct(fields) not implemented.") self._validate_values_are_expressions((*args, *kwargs.values()), method_name='aggregate') for arg in args: # The default_alias property raises TypeError if default_alias # can't be set automatically or AttributeError if it isn't an # attribute. try: arg.default_alias except (AttributeError, TypeError): raise TypeError("Complex aggregates require an alias") kwargs[arg.default_alias] = arg query = self.query.chain() for (alias, aggregate_expr) in kwargs.items(): query.add_annotation(aggregate_expr, alias, is_summary=True) if not query.annotations[alias].contains_aggregate: raise TypeError("%s is not an aggregate expression" % alias) return query.get_aggregation(self.db, kwargs) def count(self): """ Perform a SELECT COUNT() and return the number of records as an integer. If the QuerySet is already fully cached, return the length of the cached results set to avoid multiple SELECT COUNT(*) calls. """ if self._result_cache is not None: return len(self._result_cache) return self.query.get_count(using=self.db) def get(self, *args, **kwargs): """ Perform the query and return a single object matching the given keyword arguments. """ clone = self._chain() if self.query.combinator else self.filter(*args, **kwargs) if self.query.can_filter() and not self.query.distinct_fields: clone = clone.order_by() limit = None if not clone.query.select_for_update or connections[clone.db].features.supports_select_for_update_with_limit: limit = MAX_GET_RESULTS clone.query.set_limits(high=limit) num = len(clone) if num == 1: return clone._result_cache[0] if not num: raise self.model.DoesNotExist( "%s matching query does not exist." % self.model._meta.object_name ) raise self.model.MultipleObjectsReturned( 'get() returned more than one %s -- it returned %s!' % ( self.model._meta.object_name, num if not limit or num < limit else 'more than %s' % (limit - 1), ) ) def create(self, **kwargs): """ Create a new object with the given kwargs, saving it to the database and returning the created object. """ obj = self.model(**kwargs) self._for_write = True obj.save(force_insert=True, using=self.db) return obj def _populate_pk_values(self, objs): for obj in objs: if obj.pk is None: obj.pk = obj._meta.pk.get_pk_value_on_save(obj) def bulk_create(self, objs, batch_size=None, ignore_conflicts=False): """ Insert each of the instances into the database. Do *not* call save() on each of the instances, do not send any pre/post_save signals, and do not set the primary key attribute if it is an autoincrement field (except if features.can_return_rows_from_bulk_insert=True). Multi-table models are not supported. """ # When you bulk insert you don't get the primary keys back (if it's an # autoincrement, except if can_return_rows_from_bulk_insert=True), so # you can't insert into the child tables which references this. There # are two workarounds: # 1) This could be implemented if you didn't have an autoincrement pk # 2) You could do it by doing O(n) normal inserts into the parent # tables to get the primary keys back and then doing a single bulk # insert into the childmost table. # We currently set the primary keys on the objects when using # PostgreSQL via the RETURNING ID clause. It should be possible for # Oracle as well, but the semantics for extracting the primary keys is # trickier so it's not done yet. assert batch_size is None or batch_size > 0 # Check that the parents share the same concrete model with the our # model to detect the inheritance pattern ConcreteGrandParent -> # MultiTableParent -> ProxyChild. Simply checking self.model._meta.proxy # would not identify that case as involving multiple tables. for parent in self.model._meta.get_parent_list(): if parent._meta.concrete_model is not self.model._meta.concrete_model: raise ValueError("Can't bulk create a multi-table inherited model") if not objs: return objs self._for_write = True connection = connections[self.db] opts = self.model._meta fields = opts.concrete_fields objs = list(objs) self._populate_pk_values(objs) with transaction.atomic(using=self.db, savepoint=False): objs_with_pk, objs_without_pk = partition(lambda o: o.pk is None, objs) if objs_with_pk: returned_columns = self._batched_insert( objs_with_pk, fields, batch_size, ignore_conflicts=ignore_conflicts, ) for obj_with_pk, results in zip(objs_with_pk, returned_columns): for result, field in zip(results, opts.db_returning_fields): if field != opts.pk: setattr(obj_with_pk, field.attname, result) for obj_with_pk in objs_with_pk: obj_with_pk._state.adding = False obj_with_pk._state.db = self.db if objs_without_pk: fields = [f for f in fields if not isinstance(f, AutoField)] returned_columns = self._batched_insert( objs_without_pk, fields, batch_size, ignore_conflicts=ignore_conflicts, ) if connection.features.can_return_rows_from_bulk_insert and not ignore_conflicts: assert len(returned_columns) == len(objs_without_pk) for obj_without_pk, results in zip(objs_without_pk, returned_columns): for result, field in zip(results, opts.db_returning_fields): setattr(obj_without_pk, field.attname, result) obj_without_pk._state.adding = False obj_without_pk._state.db = self.db return objs def bulk_update(self, objs, fields, batch_size=None): """ Update the given fields in each of the given objects in the database. """ if batch_size is not None and batch_size < 0: raise ValueError('Batch size must be a positive integer.') if not fields: raise ValueError('Field names must be given to bulk_update().') objs = tuple(objs) if any(obj.pk is None for obj in objs): raise ValueError('All bulk_update() objects must have a primary key set.') fields = [self.model._meta.get_field(name) for name in fields] if any(not f.concrete or f.many_to_many for f in fields): raise ValueError('bulk_update() can only be used with concrete fields.') if any(f.primary_key for f in fields): raise ValueError('bulk_update() cannot be used with primary key fields.') if not objs: return # PK is used twice in the resulting update query, once in the filter # and once in the WHEN. Each field will also have one CAST. max_batch_size = connections[self.db].ops.bulk_batch_size(['pk', 'pk'] + fields, objs) batch_size = min(batch_size, max_batch_size) if batch_size else max_batch_size requires_casting = connections[self.db].features.requires_casted_case_in_updates batches = (objs[i:i + batch_size] for i in range(0, len(objs), batch_size)) updates = [] for batch_objs in batches: update_kwargs = {} for field in fields: when_statements = [] for obj in batch_objs: attr = getattr(obj, field.attname) if not isinstance(attr, Expression): attr = Value(attr, output_field=field) when_statements.append(When(pk=obj.pk, then=attr)) case_statement = Case(*when_statements, output_field=field) if requires_casting: case_statement = Cast(case_statement, output_field=field) update_kwargs[field.attname] = case_statement updates.append(([obj.pk for obj in batch_objs], update_kwargs)) with transaction.atomic(using=self.db, savepoint=False): for pks, update_kwargs in updates: self.filter(pk__in=pks).update(**update_kwargs) bulk_update.alters_data = True def get_or_create(self, defaults=None, **kwargs): """ Look up an object with the given kwargs, creating one if necessary. Return a tuple of (object, created), where created is a boolean specifying whether an object was created. """ # The get() needs to be targeted at the write database in order # to avoid potential transaction consistency problems. self._for_write = True try: return self.get(**kwargs), False except self.model.DoesNotExist: params = self._extract_model_params(defaults, **kwargs) return self._create_object_from_params(kwargs, params) def update_or_create(self, defaults=None, **kwargs): """ Look up an object with the given kwargs, updating one with defaults if it exists, otherwise create a new one. Return a tuple (object, created), where created is a boolean specifying whether an object was created. """ defaults = defaults or {} self._for_write = True with transaction.atomic(using=self.db): try: obj = self.select_for_update().get(**kwargs) except self.model.DoesNotExist: params = self._extract_model_params(defaults, **kwargs) # Lock the row so that a concurrent update is blocked until # after update_or_create() has performed its save. obj, created = self._create_object_from_params(kwargs, params, lock=True) if created: return obj, created for k, v in resolve_callables(defaults): setattr(obj, k, v) obj.save(using=self.db) return obj, False def _create_object_from_params(self, lookup, params, lock=False): """ Try to create an object using passed params. Used by get_or_create() and update_or_create(). """ try: with transaction.atomic(using=self.db): params = dict(resolve_callables(params)) obj = self.create(**params) return obj, True except IntegrityError: try: qs = self.select_for_update() if lock else self return qs.get(**lookup), False except self.model.DoesNotExist: pass raise def _extract_model_params(self, defaults, **kwargs): """ Prepare `params` for creating a model instance based on the given kwargs; for use by get_or_create() and update_or_create(). """ defaults = defaults or {} params = {k: v for k, v in kwargs.items() if LOOKUP_SEP not in k} params.update(defaults) property_names = self.model._meta._property_names invalid_params = [] for param in params: try: self.model._meta.get_field(param) except exceptions.FieldDoesNotExist: # It's okay to use a model's property if it has a setter. if not (param in property_names and getattr(self.model, param).fset): invalid_params.append(param) if invalid_params: raise exceptions.FieldError( "Invalid field name(s) for model %s: '%s'." % ( self.model._meta.object_name, "', '".join(sorted(invalid_params)), )) return params def _earliest(self, *fields): """ Return the earliest object according to fields (if given) or by the model's Meta.get_latest_by. """ if fields: order_by = fields else: order_by = getattr(self.model._meta, 'get_latest_by') if order_by and not isinstance(order_by, (tuple, list)): order_by = (order_by,) if order_by is None: raise ValueError( "earliest() and latest() require either fields as positional " "arguments or 'get_latest_by' in the model's Meta." ) assert not self.query.is_sliced, \ "Cannot change a query once a slice has been taken." obj = self._chain() obj.query.set_limits(high=1) obj.query.clear_ordering(force_empty=True) obj.query.add_ordering(*order_by) return obj.get() def earliest(self, *fields): return self._earliest(*fields) def latest(self, *fields): return self.reverse()._earliest(*fields) def first(self): """Return the first object of a query or None if no match is found.""" for obj in (self if self.ordered else self.order_by('pk'))[:1]: return obj def last(self): """Return the last object of a query or None if no match is found.""" for obj in (self.reverse() if self.ordered else self.order_by('-pk'))[:1]: return obj def in_bulk(self, id_list=None, *, field_name='pk'): """ Return a dictionary mapping each of the given IDs to the object with that ID. If `id_list` isn't provided, evaluate the entire QuerySet. """ assert not self.query.is_sliced, \ "Cannot use 'limit' or 'offset' with in_bulk" opts = self.model._meta unique_fields = [ constraint.fields[0] for constraint in opts.total_unique_constraints if len(constraint.fields) == 1 ] if ( field_name != 'pk' and not opts.get_field(field_name).unique and field_name not in unique_fields ): raise ValueError("in_bulk()'s field_name must be a unique field but %r isn't." % field_name) if id_list is not None: if not id_list: return {} filter_key = '{}__in'.format(field_name) batch_size = connections[self.db].features.max_query_params id_list = tuple(id_list) # If the database has a limit on the number of query parameters # (e.g. SQLite), retrieve objects in batches if necessary. if batch_size and batch_size < len(id_list): qs = () for offset in range(0, len(id_list), batch_size): batch = id_list[offset:offset + batch_size] qs += tuple(self.filter(**{filter_key: batch}).order_by()) else: qs = self.filter(**{filter_key: id_list}).order_by() else: qs = self._chain() return {getattr(obj, field_name): obj for obj in qs} def delete(self): """Delete the records in the current QuerySet.""" self._not_support_combined_queries('delete') assert not self.query.is_sliced, \ "Cannot use 'limit' or 'offset' with delete." if self._fields is not None: raise TypeError("Cannot call delete() after .values() or .values_list()") del_query = self._chain() # The delete is actually 2 queries - one to find related objects, # and one to delete. Make sure that the discovery of related # objects is performed on the same database as the deletion. del_query._for_write = True # Disable non-supported fields. del_query.query.select_for_update = False del_query.query.select_related = False del_query.query.clear_ordering(force_empty=True) collector = Collector(using=del_query.db) collector.collect(del_query) deleted, _rows_count = collector.delete() # Clear the result cache, in case this QuerySet gets reused. self._result_cache = None return deleted, _rows_count delete.alters_data = True delete.queryset_only = True def _raw_delete(self, using): """ Delete objects found from the given queryset in single direct SQL query. No signals are sent and there is no protection for cascades. """ query = self.query.clone() query.__class__ = sql.DeleteQuery cursor = query.get_compiler(using).execute_sql(CURSOR) if cursor: with cursor: return cursor.rowcount return 0 _raw_delete.alters_data = True def update(self, **kwargs): """ Update all elements in the current QuerySet, setting all the given fields to the appropriate values. """ self._not_support_combined_queries('update') assert not self.query.is_sliced, \ "Cannot update a query once a slice has been taken." self._for_write = True query = self.query.chain(sql.UpdateQuery) query.add_update_values(kwargs) # Clear any annotations so that they won't be present in subqueries. query.annotations = {} with transaction.mark_for_rollback_on_error(using=self.db): rows = query.get_compiler(self.db).execute_sql(CURSOR) self._result_cache = None return rows update.alters_data = True def _update(self, values): """ A version of update() that accepts field objects instead of field names. Used primarily for model saving and not intended for use by general code (it requires too much poking around at model internals to be useful at that level). """ assert not self.query.is_sliced, \ "Cannot update a query once a slice has been taken." query = self.query.chain(sql.UpdateQuery) query.add_update_fields(values) # Clear any annotations so that they won't be present in subqueries. query.annotations = {} self._result_cache = None return query.get_compiler(self.db).execute_sql(CURSOR) _update.alters_data = True _update.queryset_only = False def exists(self): if self._result_cache is None: return self.query.has_results(using=self.db) return bool(self._result_cache) def _prefetch_related_objects(self): # This method can only be called once the result cache has been filled. prefetch_related_objects(self._result_cache, *self._prefetch_related_lookups) self._prefetch_done = True def explain(self, *, format=None, **options): return self.query.explain(using=self.db, format=format, **options) ################################################## # PUBLIC METHODS THAT RETURN A QUERYSET SUBCLASS # ################################################## def raw(self, raw_query, params=None, translations=None, using=None): if using is None: using = self.db qs = RawQuerySet(raw_query, model=self.model, params=params, translations=translations, using=using) qs._prefetch_related_lookups = self._prefetch_related_lookups[:] return qs def _values(self, *fields, **expressions): clone = self._chain() if expressions: clone = clone.annotate(**expressions) clone._fields = fields clone.query.set_values(fields) return clone def values(self, *fields, **expressions): fields += tuple(expressions) clone = self._values(*fields, **expressions) clone._iterable_class = ValuesIterable return clone def values_list(self, *fields, flat=False, named=False): if flat and named: raise TypeError("'flat' and 'named' can't be used together.") if flat and len(fields) > 1: raise TypeError("'flat' is not valid when values_list is called with more than one field.") field_names = {f for f in fields if not hasattr(f, 'resolve_expression')} _fields = [] expressions = {} counter = 1 for field in fields: if hasattr(field, 'resolve_expression'): field_id_prefix = getattr(field, 'default_alias', field.__class__.__name__.lower()) while True: field_id = field_id_prefix + str(counter) counter += 1 if field_id not in field_names: break expressions[field_id] = field _fields.append(field_id) else: _fields.append(field) clone = self._values(*_fields, **expressions) clone._iterable_class = ( NamedValuesListIterable if named else FlatValuesListIterable if flat else ValuesListIterable ) return clone def dates(self, field_name, kind, order='ASC'): """ Return a list of date objects representing all available dates for the given field_name, scoped to 'kind'. """ assert kind in ('year', 'month', 'week', 'day'), \ "'kind' must be one of 'year', 'month', 'week', or 'day'." assert order in ('ASC', 'DESC'), \ "'order' must be either 'ASC' or 'DESC'." return self.annotate( datefield=Trunc(field_name, kind, output_field=DateField()), plain_field=F(field_name) ).values_list( 'datefield', flat=True ).distinct().filter(plain_field__isnull=False).order_by(('-' if order == 'DESC' else '') + 'datefield') def datetimes(self, field_name, kind, order='ASC', tzinfo=None, is_dst=None): """ Return a list of datetime objects representing all available datetimes for the given field_name, scoped to 'kind'. """ assert kind in ('year', 'month', 'week', 'day', 'hour', 'minute', 'second'), \ "'kind' must be one of 'year', 'month', 'week', 'day', 'hour', 'minute', or 'second'." assert order in ('ASC', 'DESC'), \ "'order' must be either 'ASC' or 'DESC'." if settings.USE_TZ: if tzinfo is None: tzinfo = timezone.get_current_timezone() else: tzinfo = None return self.annotate( datetimefield=Trunc( field_name, kind, output_field=DateTimeField(), tzinfo=tzinfo, is_dst=is_dst, ), plain_field=F(field_name) ).values_list( 'datetimefield', flat=True ).distinct().filter(plain_field__isnull=False).order_by(('-' if order == 'DESC' else '') + 'datetimefield') def none(self): """Return an empty QuerySet.""" clone = self._chain() clone.query.set_empty() return clone ################################################################## # PUBLIC METHODS THAT ALTER ATTRIBUTES AND RETURN A NEW QUERYSET # ################################################################## def all(self): """ Return a new QuerySet that is a copy of the current one. This allows a QuerySet to proxy for a model manager in some cases. """ return self._chain() def filter(self, *args, **kwargs): """ Return a new QuerySet instance with the args ANDed to the existing set. """ self._not_support_combined_queries('filter') return self._filter_or_exclude(False, *args, **kwargs) def exclude(self, *args, **kwargs): """ Return a new QuerySet instance with NOT (args) ANDed to the existing set. """ self._not_support_combined_queries('exclude') return self._filter_or_exclude(True, *args, **kwargs) def _filter_or_exclude(self, negate, *args, **kwargs): if args or kwargs: assert not self.query.is_sliced, \ "Cannot filter a query once a slice has been taken." clone = self._chain() if self._defer_next_filter: self._defer_next_filter = False clone._deferred_filter = negate, args, kwargs else: clone._filter_or_exclude_inplace(negate, *args, **kwargs) return clone def _filter_or_exclude_inplace(self, negate, *args, **kwargs): if negate: self._query.add_q(~Q(*args, **kwargs)) else: self._query.add_q(Q(*args, **kwargs)) def complex_filter(self, filter_obj): """ Return a new QuerySet instance with filter_obj added to the filters. filter_obj can be a Q object or a dictionary of keyword lookup arguments. This exists to support framework features such as 'limit_choices_to', and usually it will be more natural to use other methods. """ if isinstance(filter_obj, Q): clone = self._chain() clone.query.add_q(filter_obj) return clone else: return self._filter_or_exclude(False, **filter_obj) def _combinator_query(self, combinator, *other_qs, all=False): # Clone the query to inherit the select list and everything clone = self._chain() # Clear limits and ordering so they can be reapplied clone.query.clear_ordering(True) clone.query.clear_limits() clone.query.combined_queries = (self.query,) + tuple(qs.query for qs in other_qs) clone.query.combinator = combinator clone.query.combinator_all = all return clone def union(self, *other_qs, all=False): # If the query is an EmptyQuerySet, combine all nonempty querysets. if isinstance(self, EmptyQuerySet): qs = [q for q in other_qs if not isinstance(q, EmptyQuerySet)] return qs[0]._combinator_query('union', *qs[1:], all=all) if qs else self return self._combinator_query('union', *other_qs, all=all) def intersection(self, *other_qs): # If any query is an EmptyQuerySet, return it. if isinstance(self, EmptyQuerySet): return self for other in other_qs: if isinstance(other, EmptyQuerySet): return other return self._combinator_query('intersection', *other_qs) def difference(self, *other_qs): # If the query is an EmptyQuerySet, return it. if isinstance(self, EmptyQuerySet): return self return self._combinator_query('difference', *other_qs) def select_for_update(self, nowait=False, skip_locked=False, of=()): """ Return a new QuerySet instance that will select objects with a FOR UPDATE lock. """ if nowait and skip_locked: raise ValueError('The nowait option cannot be used with skip_locked.') obj = self._chain() obj._for_write = True obj.query.select_for_update = True obj.query.select_for_update_nowait = nowait obj.query.select_for_update_skip_locked = skip_locked obj.query.select_for_update_of = of return obj def select_related(self, *fields): """ Return a new QuerySet instance that will select related objects. If fields are specified, they must be ForeignKey fields and only those related objects are included in the selection. If select_related(None) is called, clear the list. """ self._not_support_combined_queries('select_related') if self._fields is not None: raise TypeError("Cannot call select_related() after .values() or .values_list()") obj = self._chain() if fields == (None,): obj.query.select_related = False elif fields: obj.query.add_select_related(fields) else: obj.query.select_related = True return obj def prefetch_related(self, *lookups): """ Return a new QuerySet instance that will prefetch the specified Many-To-One and Many-To-Many related objects when the QuerySet is evaluated. When prefetch_related() is called more than once, append to the list of prefetch lookups. If prefetch_related(None) is called, clear the list. """ self._not_support_combined_queries('prefetch_related') clone = self._chain() if lookups == (None,): clone._prefetch_related_lookups = () else: for lookup in lookups: if isinstance(lookup, Prefetch): lookup = lookup.prefetch_to lookup = lookup.split(LOOKUP_SEP, 1)[0] if lookup in self.query._filtered_relations: raise ValueError('prefetch_related() is not supported with FilteredRelation.') clone._prefetch_related_lookups = clone._prefetch_related_lookups + lookups return clone def annotate(self, *args, **kwargs): """ Return a query set in which the returned objects have been annotated with extra data or aggregations. """ self._not_support_combined_queries('annotate') self._validate_values_are_expressions(args + tuple(kwargs.values()), method_name='annotate') annotations = {} for arg in args: # The default_alias property may raise a TypeError. try: if arg.default_alias in kwargs: raise ValueError("The named annotation '%s' conflicts with the " "default name for another annotation." % arg.default_alias) except TypeError: raise TypeError("Complex annotations require an alias") annotations[arg.default_alias] = arg annotations.update(kwargs) clone = self._chain() names = self._fields if names is None: names = set(chain.from_iterable( (field.name, field.attname) if hasattr(field, 'attname') else (field.name,) for field in self.model._meta.get_fields() )) for alias, annotation in annotations.items(): if alias in names: raise ValueError("The annotation '%s' conflicts with a field on " "the model." % alias) if isinstance(annotation, FilteredRelation): clone.query.add_filtered_relation(annotation, alias) else: clone.query.add_annotation(annotation, alias, is_summary=False) for alias, annotation in clone.query.annotations.items(): if alias in annotations and annotation.contains_aggregate: if clone._fields is None: clone.query.group_by = True else: clone.query.set_group_by() break return clone def order_by(self, *field_names): """Return a new QuerySet instance with the ordering changed.""" assert not self.query.is_sliced, \ "Cannot reorder a query once a slice has been taken." obj = self._chain() obj.query.clear_ordering(force_empty=False) obj.query.add_ordering(*field_names) return obj def distinct(self, *field_names): """ Return a new QuerySet instance that will select only distinct results. """ assert not self.query.is_sliced, \ "Cannot create distinct fields once a slice has been taken." obj = self._chain() obj.query.add_distinct_fields(*field_names) return obj def extra(self, select=None, where=None, params=None, tables=None, order_by=None, select_params=None): """Add extra SQL fragments to the query.""" self._not_support_combined_queries('extra') assert not self.query.is_sliced, \ "Cannot change a query once a slice has been taken" clone = self._chain() clone.query.add_extra(select, select_params, where, params, tables, order_by) return clone def reverse(self): """Reverse the ordering of the QuerySet.""" if self.query.is_sliced: raise TypeError('Cannot reverse a query once a slice has been taken.') clone = self._chain() clone.query.standard_ordering = not clone.query.standard_ordering return clone def defer(self, *fields): """ Defer the loading of data for certain fields until they are accessed. Add the set of deferred fields to any existing set of deferred fields. The only exception to this is if None is passed in as the only parameter, in which case removal all deferrals. """ self._not_support_combined_queries('defer') if self._fields is not None: raise TypeError("Cannot call defer() after .values() or .values_list()") clone = self._chain() if fields == (None,): clone.query.clear_deferred_loading() else: clone.query.add_deferred_loading(fields) return clone def only(self, *fields): """ Essentially, the opposite of defer(). Only the fields passed into this method and that are not already specified as deferred are loaded immediately when the queryset is evaluated. """ self._not_support_combined_queries('only') if self._fields is not None: raise TypeError("Cannot call only() after .values() or .values_list()") if fields == (None,): # Can only pass None to defer(), not only(), as the rest option. # That won't stop people trying to do this, so let's be explicit. raise TypeError("Cannot pass None as an argument to only().") for field in fields: field = field.split(LOOKUP_SEP, 1)[0] if field in self.query._filtered_relations: raise ValueError('only() is not supported with FilteredRelation.') clone = self._chain() clone.query.add_immediate_loading(fields) return clone def using(self, alias): """Select which database this QuerySet should execute against.""" clone = self._chain() clone._db = alias return clone ################################### # PUBLIC INTROSPECTION ATTRIBUTES # ################################### @property def ordered(self): """ Return True if the QuerySet is ordered -- i.e. has an order_by() clause or a default ordering on the model (or is empty). """ if isinstance(self, EmptyQuerySet): return True if self.query.extra_order_by or self.query.order_by: return True elif self.query.default_ordering and self.query.get_meta().ordering: return True else: return False @property def db(self): """Return the database used if this query is executed now.""" if self._for_write: return self._db or router.db_for_write(self.model, **self._hints) return self._db or router.db_for_read(self.model, **self._hints) ################### # PRIVATE METHODS # ################### def _insert(self, objs, fields, returning_fields=None, raw=False, using=None, ignore_conflicts=False): """ Insert a new record for the given model. This provides an interface to the InsertQuery class and is how Model.save() is implemented. """ self._for_write = True if using is None: using = self.db query = sql.InsertQuery(self.model, ignore_conflicts=ignore_conflicts) query.insert_values(fields, objs, raw=raw) return query.get_compiler(using=using).execute_sql(returning_fields) _insert.alters_data = True _insert.queryset_only = False def _batched_insert(self, objs, fields, batch_size, ignore_conflicts=False): """ Helper method for bulk_create() to insert objs one batch at a time. """ if ignore_conflicts and not connections[self.db].features.supports_ignore_conflicts: raise NotSupportedError('This database backend does not support ignoring conflicts.') ops = connections[self.db].ops max_batch_size = max(ops.bulk_batch_size(fields, objs), 1) batch_size = min(batch_size, max_batch_size) if batch_size else max_batch_size inserted_rows = [] bulk_return = connections[self.db].features.can_return_rows_from_bulk_insert for item in [objs[i:i + batch_size] for i in range(0, len(objs), batch_size)]: if bulk_return and not ignore_conflicts: inserted_rows.extend(self._insert( item, fields=fields, using=self.db, returning_fields=self.model._meta.db_returning_fields, ignore_conflicts=ignore_conflicts, )) else: self._insert(item, fields=fields, using=self.db, ignore_conflicts=ignore_conflicts) return inserted_rows def _chain(self, **kwargs): """ Return a copy of the current QuerySet that's ready for another operation. """ obj = self._clone() if obj._sticky_filter: obj.query.filter_is_sticky = True obj._sticky_filter = False obj.__dict__.update(kwargs) return obj def _clone(self): """ Return a copy of the current QuerySet. A lightweight alternative to deepcopy(). """ c = self.__class__(model=self.model, query=self.query.chain(), using=self._db, hints=self._hints) c._sticky_filter = self._sticky_filter c._for_write = self._for_write c._prefetch_related_lookups = self._prefetch_related_lookups[:] c._known_related_objects = self._known_related_objects c._iterable_class = self._iterable_class c._fields = self._fields return c def _fetch_all(self): if self._result_cache is None: self._result_cache = list(self._iterable_class(self)) if self._prefetch_related_lookups and not self._prefetch_done: self._prefetch_related_objects() def _next_is_sticky(self): """ Indicate that the next filter call and the one following that should be treated as a single filter. This is only important when it comes to determining when to reuse tables for many-to-many filters. Required so that we can filter naturally on the results of related managers. This doesn't return a clone of the current QuerySet (it returns "self"). The method is only used internally and should be immediately followed by a filter() that does create a clone. """ self._sticky_filter = True return self def _merge_sanity_check(self, other): """Check that two QuerySet classes may be merged.""" if self._fields is not None and ( set(self.query.values_select) != set(other.query.values_select) or set(self.query.extra_select) != set(other.query.extra_select) or set(self.query.annotation_select) != set(other.query.annotation_select)): raise TypeError( "Merging '%s' classes must involve the same values in each case." % self.__class__.__name__ ) def _merge_known_related_objects(self, other): """ Keep track of all known related objects from either QuerySet instance. """ for field, objects in other._known_related_objects.items(): self._known_related_objects.setdefault(field, {}).update(objects) def resolve_expression(self, *args, **kwargs): if self._fields and len(self._fields) > 1: # values() queryset can only be used as nested queries # if they are set up to select only a single field. raise TypeError('Cannot use multi-field values as a filter value.') query = self.query.resolve_expression(*args, **kwargs) query._db = self._db return query resolve_expression.queryset_only = True def _add_hints(self, **hints): """ Update hinting information for use by routers. Add new key/values or overwrite existing key/values. """ self._hints.update(hints) def _has_filters(self): """ Check if this QuerySet has any filtering going on. This isn't equivalent with checking if all objects are present in results, for example, qs[1:]._has_filters() -> False. """ return self.query.has_filters() @staticmethod def _validate_values_are_expressions(values, method_name): invalid_args = sorted(str(arg) for arg in values if not hasattr(arg, 'resolve_expression')) if invalid_args: raise TypeError( 'QuerySet.%s() received non-expression(s): %s.' % ( method_name, ', '.join(invalid_args), ) ) def _not_support_combined_queries(self, operation_name): if self.query.combinator: raise NotSupportedError( 'Calling QuerySet.%s() after %s() is not supported.' % (operation_name, self.query.combinator) ) class InstanceCheckMeta(type): def __instancecheck__(self, instance): return isinstance(instance, QuerySet) and instance.query.is_empty() class EmptyQuerySet(metaclass=InstanceCheckMeta): """ Marker class to checking if a queryset is empty by .none(): isinstance(qs.none(), EmptyQuerySet) -> True """ def __init__(self, *args, **kwargs): raise TypeError("EmptyQuerySet can't be instantiated") class RawQuerySet: """ Provide an iterator which converts the results of raw SQL queries into annotated model instances. """ def __init__(self, raw_query, model=None, query=None, params=None, translations=None, using=None, hints=None): self.raw_query = raw_query self.model = model self._db = using self._hints = hints or {} self.query = query or sql.RawQuery(sql=raw_query, using=self.db, params=params) self.params = params or () self.translations = translations or {} self._result_cache = None self._prefetch_related_lookups = () self._prefetch_done = False def resolve_model_init_order(self): """Resolve the init field names and value positions.""" converter = connections[self.db].introspection.identifier_converter model_init_fields = [f for f in self.model._meta.fields if converter(f.column) in self.columns] annotation_fields = [(column, pos) for pos, column in enumerate(self.columns) if column not in self.model_fields] model_init_order = [self.columns.index(converter(f.column)) for f in model_init_fields] model_init_names = [f.attname for f in model_init_fields] return model_init_names, model_init_order, annotation_fields def prefetch_related(self, *lookups): """Same as QuerySet.prefetch_related()""" clone = self._clone() if lookups == (None,): clone._prefetch_related_lookups = () else: clone._prefetch_related_lookups = clone._prefetch_related_lookups + lookups return clone def _prefetch_related_objects(self): prefetch_related_objects(self._result_cache, *self._prefetch_related_lookups) self._prefetch_done = True def _clone(self): """Same as QuerySet._clone()""" c = self.__class__( self.raw_query, model=self.model, query=self.query, params=self.params, translations=self.translations, using=self._db, hints=self._hints ) c._prefetch_related_lookups = self._prefetch_related_lookups[:] return c def _fetch_all(self): if self._result_cache is None: self._result_cache = list(self.iterator()) if self._prefetch_related_lookups and not self._prefetch_done: self._prefetch_related_objects() def __len__(self): self._fetch_all() return len(self._result_cache) def __bool__(self): self._fetch_all() return bool(self._result_cache) def __iter__(self): self._fetch_all() return iter(self._result_cache) def iterator(self): # Cache some things for performance reasons outside the loop. db = self.db compiler = connections[db].ops.compiler('SQLCompiler')( self.query, connections[db], db ) query = iter(self.query) try: model_init_names, model_init_pos, annotation_fields = self.resolve_model_init_order() if self.model._meta.pk.attname not in model_init_names: raise exceptions.FieldDoesNotExist( 'Raw query must include the primary key' ) model_cls = self.model fields = [self.model_fields.get(c) for c in self.columns] converters = compiler.get_converters([ f.get_col(f.model._meta.db_table) if f else None for f in fields ]) if converters: query = compiler.apply_converters(query, converters) for values in query: # Associate fields to values model_init_values = [values[pos] for pos in model_init_pos] instance = model_cls.from_db(db, model_init_names, model_init_values) if annotation_fields: for column, pos in annotation_fields: setattr(instance, column, values[pos]) yield instance finally: # Done iterating the Query. If it has its own cursor, close it. if hasattr(self.query, 'cursor') and self.query.cursor: self.query.cursor.close() def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self.query) def __getitem__(self, k): return list(self)[k] @property def db(self): """Return the database used if this query is executed now.""" return self._db or router.db_for_read(self.model, **self._hints) def using(self, alias): """Select the database this RawQuerySet should execute against.""" return RawQuerySet( self.raw_query, model=self.model, query=self.query.chain(using=alias), params=self.params, translations=self.translations, using=alias, ) @cached_property def columns(self): """ A list of model field names in the order they'll appear in the query results. """ columns = self.query.get_columns() # Adjust any column names which don't match field names for (query_name, model_name) in self.translations.items(): # Ignore translations for nonexistent column names try: index = columns.index(query_name) except ValueError: pass else: columns[index] = model_name return columns @cached_property def model_fields(self): """A dict mapping column names to model field names.""" converter = connections[self.db].introspection.identifier_converter model_fields = {} for field in self.model._meta.fields: name, column = field.get_attname_column() model_fields[converter(column)] = field return model_fields class Prefetch: def __init__(self, lookup, queryset=None, to_attr=None): # `prefetch_through` is the path we traverse to perform the prefetch. self.prefetch_through = lookup # `prefetch_to` is the path to the attribute that stores the result. self.prefetch_to = lookup if queryset is not None and ( isinstance(queryset, RawQuerySet) or ( hasattr(queryset, '_iterable_class') and not issubclass(queryset._iterable_class, ModelIterable) ) ): raise ValueError( 'Prefetch querysets cannot use raw(), values(), and ' 'values_list().' ) if to_attr: self.prefetch_to = LOOKUP_SEP.join(lookup.split(LOOKUP_SEP)[:-1] + [to_attr]) self.queryset = queryset self.to_attr = to_attr def __getstate__(self): obj_dict = self.__dict__.copy() if self.queryset is not None: # Prevent the QuerySet from being evaluated obj_dict['queryset'] = self.queryset._chain( _result_cache=[], _prefetch_done=True, ) return obj_dict def add_prefix(self, prefix): self.prefetch_through = prefix + LOOKUP_SEP + self.prefetch_through self.prefetch_to = prefix + LOOKUP_SEP + self.prefetch_to def get_current_prefetch_to(self, level): return LOOKUP_SEP.join(self.prefetch_to.split(LOOKUP_SEP)[:level + 1]) def get_current_to_attr(self, level): parts = self.prefetch_to.split(LOOKUP_SEP) to_attr = parts[level] as_attr = self.to_attr and level == len(parts) - 1 return to_attr, as_attr def get_current_queryset(self, level): if self.get_current_prefetch_to(level) == self.prefetch_to: return self.queryset return None def __eq__(self, other): if not isinstance(other, Prefetch): return NotImplemented return self.prefetch_to == other.prefetch_to def __hash__(self): return hash((self.__class__, self.prefetch_to)) def normalize_prefetch_lookups(lookups, prefix=None): """Normalize lookups into Prefetch objects.""" ret = [] for lookup in lookups: if not isinstance(lookup, Prefetch): lookup = Prefetch(lookup) if prefix: lookup.add_prefix(prefix) ret.append(lookup) return ret def prefetch_related_objects(model_instances, *related_lookups): """ Populate prefetched object caches for a list of model instances based on the lookups/Prefetch instances given. """ if not model_instances: return # nothing to do # We need to be able to dynamically add to the list of prefetch_related # lookups that we look up (see below). So we need some book keeping to # ensure we don't do duplicate work. done_queries = {} # dictionary of things like 'foo__bar': [results] auto_lookups = set() # we add to this as we go through. followed_descriptors = set() # recursion protection all_lookups = normalize_prefetch_lookups(reversed(related_lookups)) while all_lookups: lookup = all_lookups.pop() if lookup.prefetch_to in done_queries: if lookup.queryset is not None: raise ValueError("'%s' lookup was already seen with a different queryset. " "You may need to adjust the ordering of your lookups." % lookup.prefetch_to) continue # Top level, the list of objects to decorate is the result cache # from the primary QuerySet. It won't be for deeper levels. obj_list = model_instances through_attrs = lookup.prefetch_through.split(LOOKUP_SEP) for level, through_attr in enumerate(through_attrs): # Prepare main instances if not obj_list: break prefetch_to = lookup.get_current_prefetch_to(level) if prefetch_to in done_queries: # Skip any prefetching, and any object preparation obj_list = done_queries[prefetch_to] continue # Prepare objects: good_objects = True for obj in obj_list: # Since prefetching can re-use instances, it is possible to have # the same instance multiple times in obj_list, so obj might # already be prepared. if not hasattr(obj, '_prefetched_objects_cache'): try: obj._prefetched_objects_cache = {} except (AttributeError, TypeError): # Must be an immutable object from # values_list(flat=True), for example (TypeError) or # a QuerySet subclass that isn't returning Model # instances (AttributeError), either in Django or a 3rd # party. prefetch_related() doesn't make sense, so quit. good_objects = False break if not good_objects: break # Descend down tree # We assume that objects retrieved are homogeneous (which is the premise # of prefetch_related), so what applies to first object applies to all. first_obj = obj_list[0] to_attr = lookup.get_current_to_attr(level)[0] prefetcher, descriptor, attr_found, is_fetched = get_prefetcher(first_obj, through_attr, to_attr) if not attr_found: raise AttributeError("Cannot find '%s' on %s object, '%s' is an invalid " "parameter to prefetch_related()" % (through_attr, first_obj.__class__.__name__, lookup.prefetch_through)) if level == len(through_attrs) - 1 and prefetcher is None: # Last one, this *must* resolve to something that supports # prefetching, otherwise there is no point adding it and the # developer asking for it has made a mistake. raise ValueError("'%s' does not resolve to an item that supports " "prefetching - this is an invalid parameter to " "prefetch_related()." % lookup.prefetch_through) if prefetcher is not None and not is_fetched: obj_list, additional_lookups = prefetch_one_level(obj_list, prefetcher, lookup, level) # We need to ensure we don't keep adding lookups from the # same relationships to stop infinite recursion. So, if we # are already on an automatically added lookup, don't add # the new lookups from relationships we've seen already. if not (prefetch_to in done_queries and lookup in auto_lookups and descriptor in followed_descriptors): done_queries[prefetch_to] = obj_list new_lookups = normalize_prefetch_lookups(reversed(additional_lookups), prefetch_to) auto_lookups.update(new_lookups) all_lookups.extend(new_lookups) followed_descriptors.add(descriptor) else: # Either a singly related object that has already been fetched # (e.g. via select_related), or hopefully some other property # that doesn't support prefetching but needs to be traversed. # We replace the current list of parent objects with the list # of related objects, filtering out empty or missing values so # that we can continue with nullable or reverse relations. new_obj_list = [] for obj in obj_list: if through_attr in getattr(obj, '_prefetched_objects_cache', ()): # If related objects have been prefetched, use the # cache rather than the object's through_attr. new_obj = list(obj._prefetched_objects_cache.get(through_attr)) else: try: new_obj = getattr(obj, through_attr) except exceptions.ObjectDoesNotExist: continue if new_obj is None: continue # We special-case `list` rather than something more generic # like `Iterable` because we don't want to accidentally match # user models that define __iter__. if isinstance(new_obj, list): new_obj_list.extend(new_obj) else: new_obj_list.append(new_obj) obj_list = new_obj_list def get_prefetcher(instance, through_attr, to_attr): """ For the attribute 'through_attr' on the given instance, find an object that has a get_prefetch_queryset(). Return a 4 tuple containing: (the object with get_prefetch_queryset (or None), the descriptor object representing this relationship (or None), a boolean that is False if the attribute was not found at all, a boolean that is True if the attribute has already been fetched) """ prefetcher = None is_fetched = False # For singly related objects, we have to avoid getting the attribute # from the object, as this will trigger the query. So we first try # on the class, in order to get the descriptor object. rel_obj_descriptor = getattr(instance.__class__, through_attr, None) if rel_obj_descriptor is None: attr_found = hasattr(instance, through_attr) else: attr_found = True if rel_obj_descriptor: # singly related object, descriptor object has the # get_prefetch_queryset() method. if hasattr(rel_obj_descriptor, 'get_prefetch_queryset'): prefetcher = rel_obj_descriptor if rel_obj_descriptor.is_cached(instance): is_fetched = True else: # descriptor doesn't support prefetching, so we go ahead and get # the attribute on the instance rather than the class to # support many related managers rel_obj = getattr(instance, through_attr) if hasattr(rel_obj, 'get_prefetch_queryset'): prefetcher = rel_obj if through_attr != to_attr: # Special case cached_property instances because hasattr # triggers attribute computation and assignment. if isinstance(getattr(instance.__class__, to_attr, None), cached_property): is_fetched = to_attr in instance.__dict__ else: is_fetched = hasattr(instance, to_attr) else: is_fetched = through_attr in instance._prefetched_objects_cache return prefetcher, rel_obj_descriptor, attr_found, is_fetched def prefetch_one_level(instances, prefetcher, lookup, level): """ Helper function for prefetch_related_objects(). Run prefetches on all instances using the prefetcher object, assigning results to relevant caches in instance. Return the prefetched objects along with any additional prefetches that must be done due to prefetch_related lookups found from default managers. """ # prefetcher must have a method get_prefetch_queryset() which takes a list # of instances, and returns a tuple: # (queryset of instances of self.model that are related to passed in instances, # callable that gets value to be matched for returned instances, # callable that gets value to be matched for passed in instances, # boolean that is True for singly related objects, # cache or field name to assign to, # boolean that is True when the previous argument is a cache name vs a field name). # The 'values to be matched' must be hashable as they will be used # in a dictionary. rel_qs, rel_obj_attr, instance_attr, single, cache_name, is_descriptor = ( prefetcher.get_prefetch_queryset(instances, lookup.get_current_queryset(level))) # We have to handle the possibility that the QuerySet we just got back # contains some prefetch_related lookups. We don't want to trigger the # prefetch_related functionality by evaluating the query. Rather, we need # to merge in the prefetch_related lookups. # Copy the lookups in case it is a Prefetch object which could be reused # later (happens in nested prefetch_related). additional_lookups = [ copy.copy(additional_lookup) for additional_lookup in getattr(rel_qs, '_prefetch_related_lookups', ()) ] if additional_lookups: # Don't need to clone because the manager should have given us a fresh # instance, so we access an internal instead of using public interface # for performance reasons. rel_qs._prefetch_related_lookups = () all_related_objects = list(rel_qs) rel_obj_cache = {} for rel_obj in all_related_objects: rel_attr_val = rel_obj_attr(rel_obj) rel_obj_cache.setdefault(rel_attr_val, []).append(rel_obj) to_attr, as_attr = lookup.get_current_to_attr(level) # Make sure `to_attr` does not conflict with a field. if as_attr and instances: # We assume that objects retrieved are homogeneous (which is the premise # of prefetch_related), so what applies to first object applies to all. model = instances[0].__class__ try: model._meta.get_field(to_attr) except exceptions.FieldDoesNotExist: pass else: msg = 'to_attr={} conflicts with a field on the {} model.' raise ValueError(msg.format(to_attr, model.__name__)) # Whether or not we're prefetching the last part of the lookup. leaf = len(lookup.prefetch_through.split(LOOKUP_SEP)) - 1 == level for obj in instances: instance_attr_val = instance_attr(obj) vals = rel_obj_cache.get(instance_attr_val, []) if single: val = vals[0] if vals else None if as_attr: # A to_attr has been given for the prefetch. setattr(obj, to_attr, val) elif is_descriptor: # cache_name points to a field name in obj. # This field is a descriptor for a related object. setattr(obj, cache_name, val) else: # No to_attr has been given for this prefetch operation and the # cache_name does not point to a descriptor. Store the value of # the field in the object's field cache. obj._state.fields_cache[cache_name] = val else: if as_attr: setattr(obj, to_attr, vals) else: manager = getattr(obj, to_attr) if leaf and lookup.queryset is not None: qs = manager._apply_rel_filters(lookup.queryset) else: qs = manager.get_queryset() qs._result_cache = vals # We don't want the individual qs doing prefetch_related now, # since we have merged this into the current work. qs._prefetch_done = True obj._prefetched_objects_cache[cache_name] = qs return all_related_objects, additional_lookups class RelatedPopulator: """ RelatedPopulator is used for select_related() object instantiation. The idea is that each select_related() model will be populated by a different RelatedPopulator instance. The RelatedPopulator instances get klass_info and select (computed in SQLCompiler) plus the used db as input for initialization. That data is used to compute which columns to use, how to instantiate the model, and how to populate the links between the objects. The actual creation of the objects is done in populate() method. This method gets row and from_obj as input and populates the select_related() model instance. """ def __init__(self, klass_info, select, db): self.db = db # Pre-compute needed attributes. The attributes are: # - model_cls: the possibly deferred model class to instantiate # - either: # - cols_start, cols_end: usually the columns in the row are # in the same order model_cls.__init__ expects them, so we # can instantiate by model_cls(*row[cols_start:cols_end]) # - reorder_for_init: When select_related descends to a child # class, then we want to reuse the already selected parent # data. However, in this case the parent data isn't necessarily # in the same order that Model.__init__ expects it to be, so # we have to reorder the parent data. The reorder_for_init # attribute contains a function used to reorder the field data # in the order __init__ expects it. # - pk_idx: the index of the primary key field in the reordered # model data. Used to check if a related object exists at all. # - init_list: the field attnames fetched from the database. For # deferred models this isn't the same as all attnames of the # model's fields. # - related_populators: a list of RelatedPopulator instances if # select_related() descends to related models from this model. # - local_setter, remote_setter: Methods to set cached values on # the object being populated and on the remote object. Usually # these are Field.set_cached_value() methods. select_fields = klass_info['select_fields'] from_parent = klass_info['from_parent'] if not from_parent: self.cols_start = select_fields[0] self.cols_end = select_fields[-1] + 1 self.init_list = [ f[0].target.attname for f in select[self.cols_start:self.cols_end] ] self.reorder_for_init = None else: attname_indexes = {select[idx][0].target.attname: idx for idx in select_fields} model_init_attnames = (f.attname for f in klass_info['model']._meta.concrete_fields) self.init_list = [attname for attname in model_init_attnames if attname in attname_indexes] self.reorder_for_init = operator.itemgetter(*[attname_indexes[attname] for attname in self.init_list]) self.model_cls = klass_info['model'] self.pk_idx = self.init_list.index(self.model_cls._meta.pk.attname) self.related_populators = get_related_populators(klass_info, select, self.db) self.local_setter = klass_info['local_setter'] self.remote_setter = klass_info['remote_setter'] def populate(self, row, from_obj): if self.reorder_for_init: obj_data = self.reorder_for_init(row) else: obj_data = row[self.cols_start:self.cols_end] if obj_data[self.pk_idx] is None: obj = None else: obj = self.model_cls.from_db(self.db, self.init_list, obj_data) for rel_iter in self.related_populators: rel_iter.populate(row, obj) self.local_setter(from_obj, obj) if obj is not None: self.remote_setter(obj, from_obj) def get_related_populators(klass_info, select, db): iterators = [] related_klass_infos = klass_info.get('related_klass_infos', []) for rel_klass_info in related_klass_infos: rel_cls = RelatedPopulator(rel_klass_info, select, db) iterators.append(rel_cls) return iterators
ed6b468cb29ac46b10a260d37b500ea66d2a264b5e1a80d6d7adf2e6e73c4b05
from django.core.exceptions import ObjectDoesNotExist from django.db.models import signals from django.db.models.aggregates import * # NOQA from django.db.models.aggregates import __all__ as aggregates_all from django.db.models.constraints import * # NOQA from django.db.models.constraints import __all__ as constraints_all from django.db.models.deletion import ( CASCADE, DO_NOTHING, PROTECT, RESTRICT, SET, SET_DEFAULT, SET_NULL, ProtectedError, RestrictedError, ) from django.db.models.enums import * # NOQA from django.db.models.enums import __all__ as enums_all from django.db.models.expressions import ( Case, Exists, Expression, ExpressionList, ExpressionWrapper, F, Func, OrderBy, OuterRef, RowRange, Subquery, Value, ValueRange, When, Window, WindowFrame, ) from django.db.models.fields import * # NOQA from django.db.models.fields import __all__ as fields_all from django.db.models.fields.files import FileField, ImageField from django.db.models.fields.json import JSONField from django.db.models.fields.proxy import OrderWrt from django.db.models.indexes import * # NOQA from django.db.models.indexes import __all__ as indexes_all from django.db.models.lookups import Lookup, Transform from django.db.models.manager import Manager from django.db.models.query import Prefetch, QuerySet, prefetch_related_objects from django.db.models.query_utils import FilteredRelation, Q # Imports that would create circular imports if sorted from django.db.models.base import DEFERRED, Model # isort:skip from django.db.models.fields.related import ( # isort:skip ForeignKey, ForeignObject, OneToOneField, ManyToManyField, ForeignObjectRel, ManyToOneRel, ManyToManyRel, OneToOneRel, ) __all__ = aggregates_all + constraints_all + enums_all + fields_all + indexes_all __all__ += [ 'ObjectDoesNotExist', 'signals', 'CASCADE', 'DO_NOTHING', 'PROTECT', 'RESTRICT', 'SET', 'SET_DEFAULT', 'SET_NULL', 'ProtectedError', 'RestrictedError', 'Case', 'Exists', 'Expression', 'ExpressionList', 'ExpressionWrapper', 'F', 'Func', 'OrderBy', 'OuterRef', 'RowRange', 'Subquery', 'Value', 'ValueRange', 'When', 'Window', 'WindowFrame', 'FileField', 'ImageField', 'JSONField', 'OrderWrt', 'Lookup', 'Transform', 'Manager', 'Prefetch', 'Q', 'QuerySet', 'prefetch_related_objects', 'DEFERRED', 'Model', 'FilteredRelation', 'ForeignKey', 'ForeignObject', 'OneToOneField', 'ManyToManyField', 'ForeignObjectRel', 'ManyToOneRel', 'ManyToManyRel', 'OneToOneRel', ]
a4a4afd88843c1ff510dee02ae7197883e059fbc45b083ae77863c392f746e71
import bisect import copy import inspect from collections import defaultdict from django.apps import apps from django.conf import settings from django.core.exceptions import FieldDoesNotExist from django.db import connections from django.db.models import AutoField, Manager, OrderWrt, UniqueConstraint from django.db.models.query_utils import PathInfo from django.utils.datastructures import ImmutableList, OrderedSet from django.utils.functional import cached_property from django.utils.text import camel_case_to_spaces, format_lazy from django.utils.translation import override PROXY_PARENTS = object() EMPTY_RELATION_TREE = () IMMUTABLE_WARNING = ( "The return type of '%s' should never be mutated. If you want to manipulate this list " "for your own use, make a copy first." ) DEFAULT_NAMES = ( 'verbose_name', 'verbose_name_plural', 'db_table', 'ordering', 'unique_together', 'permissions', 'get_latest_by', 'order_with_respect_to', 'app_label', 'db_tablespace', 'abstract', 'managed', 'proxy', 'swappable', 'auto_created', 'index_together', 'apps', 'default_permissions', 'select_on_save', 'default_related_name', 'required_db_features', 'required_db_vendor', 'base_manager_name', 'default_manager_name', 'indexes', 'constraints', ) def normalize_together(option_together): """ option_together can be either a tuple of tuples, or a single tuple of two strings. Normalize it to a tuple of tuples, so that calling code can uniformly expect that. """ try: if not option_together: return () if not isinstance(option_together, (tuple, list)): raise TypeError first_element = option_together[0] if not isinstance(first_element, (tuple, list)): option_together = (option_together,) # Normalize everything to tuples return tuple(tuple(ot) for ot in option_together) except TypeError: # If the value of option_together isn't valid, return it # verbatim; this will be picked up by the check framework later. return option_together def make_immutable_fields_list(name, data): return ImmutableList(data, warning=IMMUTABLE_WARNING % name) class Options: FORWARD_PROPERTIES = { 'fields', 'many_to_many', 'concrete_fields', 'local_concrete_fields', '_forward_fields_map', 'managers', 'managers_map', 'base_manager', 'default_manager', } REVERSE_PROPERTIES = {'related_objects', 'fields_map', '_relation_tree'} default_apps = apps def __init__(self, meta, app_label=None): self._get_fields_cache = {} self.local_fields = [] self.local_many_to_many = [] self.private_fields = [] self.local_managers = [] self.base_manager_name = None self.default_manager_name = None self.model_name = None self.verbose_name = None self.verbose_name_plural = None self.db_table = '' self.ordering = [] self._ordering_clash = False self.indexes = [] self.constraints = [] self.unique_together = [] self.index_together = [] self.select_on_save = False self.default_permissions = ('add', 'change', 'delete', 'view') self.permissions = [] self.object_name = None self.app_label = app_label self.get_latest_by = None self.order_with_respect_to = None self.db_tablespace = settings.DEFAULT_TABLESPACE self.required_db_features = [] self.required_db_vendor = None self.meta = meta self.pk = None self.auto_field = None self.abstract = False self.managed = True self.proxy = False # For any class that is a proxy (including automatically created # classes for deferred object loading), proxy_for_model tells us # which class this model is proxying. Note that proxy_for_model # can create a chain of proxy models. For non-proxy models, the # variable is always None. self.proxy_for_model = None # For any non-abstract class, the concrete class is the model # in the end of the proxy_for_model chain. In particular, for # concrete models, the concrete_model is always the class itself. self.concrete_model = None self.swappable = None self.parents = {} self.auto_created = False # List of all lookups defined in ForeignKey 'limit_choices_to' options # from *other* models. Needed for some admin checks. Internal use only. self.related_fkey_lookups = [] # A custom app registry to use, if you're making a separate model set. self.apps = self.default_apps self.default_related_name = None @property def label(self): return '%s.%s' % (self.app_label, self.object_name) @property def label_lower(self): return '%s.%s' % (self.app_label, self.model_name) @property def app_config(self): # Don't go through get_app_config to avoid triggering imports. return self.apps.app_configs.get(self.app_label) @property def installed(self): return self.app_config is not None def contribute_to_class(self, cls, name): from django.db import connection from django.db.backends.utils import truncate_name cls._meta = self self.model = cls # First, construct the default values for these options. self.object_name = cls.__name__ self.model_name = self.object_name.lower() self.verbose_name = camel_case_to_spaces(self.object_name) # Store the original user-defined values for each option, # for use when serializing the model definition self.original_attrs = {} # Next, apply any overridden values from 'class Meta'. if self.meta: meta_attrs = self.meta.__dict__.copy() for name in self.meta.__dict__: # Ignore any private attributes that Django doesn't care about. # NOTE: We can't modify a dictionary's contents while looping # over it, so we loop over the *original* dictionary instead. if name.startswith('_'): del meta_attrs[name] for attr_name in DEFAULT_NAMES: if attr_name in meta_attrs: setattr(self, attr_name, meta_attrs.pop(attr_name)) self.original_attrs[attr_name] = getattr(self, attr_name) elif hasattr(self.meta, attr_name): setattr(self, attr_name, getattr(self.meta, attr_name)) self.original_attrs[attr_name] = getattr(self, attr_name) self.unique_together = normalize_together(self.unique_together) self.index_together = normalize_together(self.index_together) # App label/class name interpolation for names of constraints and # indexes. if not getattr(cls._meta, 'abstract', False): for attr_name in {'constraints', 'indexes'}: objs = getattr(self, attr_name, []) setattr(self, attr_name, self._format_names_with_class(cls, objs)) # verbose_name_plural is a special case because it uses a 's' # by default. if self.verbose_name_plural is None: self.verbose_name_plural = format_lazy('{}s', self.verbose_name) # order_with_respect_and ordering are mutually exclusive. self._ordering_clash = bool(self.ordering and self.order_with_respect_to) # Any leftover attributes must be invalid. if meta_attrs != {}: raise TypeError("'class Meta' got invalid attribute(s): %s" % ','.join(meta_attrs)) else: self.verbose_name_plural = format_lazy('{}s', self.verbose_name) del self.meta # If the db_table wasn't provided, use the app_label + model_name. if not self.db_table: self.db_table = "%s_%s" % (self.app_label, self.model_name) self.db_table = truncate_name(self.db_table, connection.ops.max_name_length()) def _format_names_with_class(self, cls, objs): """App label/class name interpolation for object names.""" new_objs = [] for obj in objs: obj = obj.clone() obj.name = obj.name % { 'app_label': cls._meta.app_label.lower(), 'class': cls.__name__.lower(), } new_objs.append(obj) return new_objs def _prepare(self, model): if self.order_with_respect_to: # The app registry will not be ready at this point, so we cannot # use get_field(). query = self.order_with_respect_to try: self.order_with_respect_to = next( f for f in self._get_fields(reverse=False) if f.name == query or f.attname == query ) except StopIteration: raise FieldDoesNotExist("%s has no field named '%s'" % (self.object_name, query)) self.ordering = ('_order',) if not any(isinstance(field, OrderWrt) for field in model._meta.local_fields): model.add_to_class('_order', OrderWrt()) else: self.order_with_respect_to = None if self.pk is None: if self.parents: # Promote the first parent link in lieu of adding yet another # field. field = next(iter(self.parents.values())) # Look for a local field with the same name as the # first parent link. If a local field has already been # created, use it instead of promoting the parent already_created = [fld for fld in self.local_fields if fld.name == field.name] if already_created: field = already_created[0] field.primary_key = True self.setup_pk(field) else: auto = AutoField(verbose_name='ID', primary_key=True, auto_created=True) model.add_to_class('id', auto) def add_manager(self, manager): self.local_managers.append(manager) self._expire_cache() def add_field(self, field, private=False): # Insert the given field in the order in which it was created, using # the "creation_counter" attribute of the field. # Move many-to-many related fields from self.fields into # self.many_to_many. if private: self.private_fields.append(field) elif field.is_relation and field.many_to_many: bisect.insort(self.local_many_to_many, field) else: bisect.insort(self.local_fields, field) self.setup_pk(field) # If the field being added is a relation to another known field, # expire the cache on this field and the forward cache on the field # being referenced, because there will be new relationships in the # cache. Otherwise, expire the cache of references *to* this field. # The mechanism for getting at the related model is slightly odd - # ideally, we'd just ask for field.related_model. However, related_model # is a cached property, and all the models haven't been loaded yet, so # we need to make sure we don't cache a string reference. if field.is_relation and hasattr(field.remote_field, 'model') and field.remote_field.model: try: field.remote_field.model._meta._expire_cache(forward=False) except AttributeError: pass self._expire_cache() else: self._expire_cache(reverse=False) def setup_pk(self, field): if not self.pk and field.primary_key: self.pk = field field.serialize = False def setup_proxy(self, target): """ Do the internal setup so that the current model is a proxy for "target". """ self.pk = target._meta.pk self.proxy_for_model = target self.db_table = target._meta.db_table def __repr__(self): return '<Options for %s>' % self.object_name def __str__(self): return self.label_lower def can_migrate(self, connection): """ Return True if the model can/should be migrated on the `connection`. `connection` can be either a real connection or a connection alias. """ if self.proxy or self.swapped or not self.managed: return False if isinstance(connection, str): connection = connections[connection] if self.required_db_vendor: return self.required_db_vendor == connection.vendor if self.required_db_features: return all(getattr(connection.features, feat, False) for feat in self.required_db_features) return True @property def verbose_name_raw(self): """Return the untranslated verbose name.""" with override(None): return str(self.verbose_name) @property def swapped(self): """ Has this model been swapped out for another? If so, return the model name of the replacement; otherwise, return None. For historical reasons, model name lookups using get_model() are case insensitive, so we make sure we are case insensitive here. """ if self.swappable: swapped_for = getattr(settings, self.swappable, None) if swapped_for: try: swapped_label, swapped_object = swapped_for.split('.') except ValueError: # setting not in the format app_label.model_name # raising ImproperlyConfigured here causes problems with # test cleanup code - instead it is raised in get_user_model # or as part of validation. return swapped_for if '%s.%s' % (swapped_label, swapped_object.lower()) != self.label_lower: return swapped_for return None @cached_property def managers(self): managers = [] seen_managers = set() bases = (b for b in self.model.mro() if hasattr(b, '_meta')) for depth, base in enumerate(bases): for manager in base._meta.local_managers: if manager.name in seen_managers: continue manager = copy.copy(manager) manager.model = self.model seen_managers.add(manager.name) managers.append((depth, manager.creation_counter, manager)) return make_immutable_fields_list( "managers", (m[2] for m in sorted(managers)), ) @cached_property def managers_map(self): return {manager.name: manager for manager in self.managers} @cached_property def base_manager(self): base_manager_name = self.base_manager_name if not base_manager_name: # Get the first parent's base_manager_name if there's one. for parent in self.model.mro()[1:]: if hasattr(parent, '_meta'): if parent._base_manager.name != '_base_manager': base_manager_name = parent._base_manager.name break if base_manager_name: try: return self.managers_map[base_manager_name] except KeyError: raise ValueError( "%s has no manager named %r" % ( self.object_name, base_manager_name, ) ) manager = Manager() manager.name = '_base_manager' manager.model = self.model manager.auto_created = True return manager @cached_property def default_manager(self): default_manager_name = self.default_manager_name if not default_manager_name and not self.local_managers: # Get the first parent's default_manager_name if there's one. for parent in self.model.mro()[1:]: if hasattr(parent, '_meta'): default_manager_name = parent._meta.default_manager_name break if default_manager_name: try: return self.managers_map[default_manager_name] except KeyError: raise ValueError( "%s has no manager named %r" % ( self.object_name, default_manager_name, ) ) if self.managers: return self.managers[0] @cached_property def fields(self): """ Return a list of all forward fields on the model and its parents, excluding ManyToManyFields. Private API intended only to be used by Django itself; get_fields() combined with filtering of field properties is the public API for obtaining this field list. """ # For legacy reasons, the fields property should only contain forward # fields that are not private or with a m2m cardinality. Therefore we # pass these three filters as filters to the generator. # The third lambda is a longwinded way of checking f.related_model - we don't # use that property directly because related_model is a cached property, # and all the models may not have been loaded yet; we don't want to cache # the string reference to the related_model. def is_not_an_m2m_field(f): return not (f.is_relation and f.many_to_many) def is_not_a_generic_relation(f): return not (f.is_relation and f.one_to_many) def is_not_a_generic_foreign_key(f): return not ( f.is_relation and f.many_to_one and not (hasattr(f.remote_field, 'model') and f.remote_field.model) ) return make_immutable_fields_list( "fields", (f for f in self._get_fields(reverse=False) if is_not_an_m2m_field(f) and is_not_a_generic_relation(f) and is_not_a_generic_foreign_key(f)) ) @cached_property def concrete_fields(self): """ Return a list of all concrete fields on the model and its parents. Private API intended only to be used by Django itself; get_fields() combined with filtering of field properties is the public API for obtaining this field list. """ return make_immutable_fields_list( "concrete_fields", (f for f in self.fields if f.concrete) ) @cached_property def local_concrete_fields(self): """ Return a list of all concrete fields on the model. Private API intended only to be used by Django itself; get_fields() combined with filtering of field properties is the public API for obtaining this field list. """ return make_immutable_fields_list( "local_concrete_fields", (f for f in self.local_fields if f.concrete) ) @cached_property def many_to_many(self): """ Return a list of all many to many fields on the model and its parents. Private API intended only to be used by Django itself; get_fields() combined with filtering of field properties is the public API for obtaining this list. """ return make_immutable_fields_list( "many_to_many", (f for f in self._get_fields(reverse=False) if f.is_relation and f.many_to_many) ) @cached_property def related_objects(self): """ Return all related objects pointing to the current model. The related objects can come from a one-to-one, one-to-many, or many-to-many field relation type. Private API intended only to be used by Django itself; get_fields() combined with filtering of field properties is the public API for obtaining this field list. """ all_related_fields = self._get_fields(forward=False, reverse=True, include_hidden=True) return make_immutable_fields_list( "related_objects", (obj for obj in all_related_fields if not obj.hidden or obj.field.many_to_many) ) @cached_property def _forward_fields_map(self): res = {} fields = self._get_fields(reverse=False) for field in fields: res[field.name] = field # Due to the way Django's internals work, get_field() should also # be able to fetch a field by attname. In the case of a concrete # field with relation, includes the *_id name too try: res[field.attname] = field except AttributeError: pass return res @cached_property def fields_map(self): res = {} fields = self._get_fields(forward=False, include_hidden=True) for field in fields: res[field.name] = field # Due to the way Django's internals work, get_field() should also # be able to fetch a field by attname. In the case of a concrete # field with relation, includes the *_id name too try: res[field.attname] = field except AttributeError: pass return res def get_field(self, field_name): """ Return a field instance given the name of a forward or reverse field. """ try: # In order to avoid premature loading of the relation tree # (expensive) we prefer checking if the field is a forward field. return self._forward_fields_map[field_name] except KeyError: # If the app registry is not ready, reverse fields are # unavailable, therefore we throw a FieldDoesNotExist exception. if not self.apps.models_ready: raise FieldDoesNotExist( "%s has no field named '%s'. The app cache isn't ready yet, " "so if this is an auto-created related field, it won't " "be available yet." % (self.object_name, field_name) ) try: # Retrieve field instance by name from cached or just-computed # field map. return self.fields_map[field_name] except KeyError: raise FieldDoesNotExist("%s has no field named '%s'" % (self.object_name, field_name)) def get_base_chain(self, model): """ Return a list of parent classes leading to `model` (ordered from closest to most distant ancestor). This has to handle the case where `model` is a grandparent or even more distant relation. """ if not self.parents: return [] if model in self.parents: return [model] for parent in self.parents: res = parent._meta.get_base_chain(model) if res: res.insert(0, parent) return res return [] def get_parent_list(self): """ Return all the ancestors of this model as a list ordered by MRO. Useful for determining if something is an ancestor, regardless of lineage. """ result = OrderedSet(self.parents) for parent in self.parents: for ancestor in parent._meta.get_parent_list(): result.add(ancestor) return list(result) def get_ancestor_link(self, ancestor): """ Return the field on the current model which points to the given "ancestor". This is possible an indirect link (a pointer to a parent model, which points, eventually, to the ancestor). Used when constructing table joins for model inheritance. Return None if the model isn't an ancestor of this one. """ if ancestor in self.parents: return self.parents[ancestor] for parent in self.parents: # Tries to get a link field from the immediate parent parent_link = parent._meta.get_ancestor_link(ancestor) if parent_link: # In case of a proxied model, the first link # of the chain to the ancestor is that parent # links return self.parents[parent] or parent_link def get_path_to_parent(self, parent): """ Return a list of PathInfos containing the path from the current model to the parent model, or an empty list if parent is not a parent of the current model. """ if self.model is parent: return [] # Skip the chain of proxy to the concrete proxied model. proxied_model = self.concrete_model path = [] opts = self for int_model in self.get_base_chain(parent): if int_model is proxied_model: opts = int_model._meta else: final_field = opts.parents[int_model] targets = (final_field.remote_field.get_related_field(),) opts = int_model._meta path.append(PathInfo( from_opts=final_field.model._meta, to_opts=opts, target_fields=targets, join_field=final_field, m2m=False, direct=True, filtered_relation=None, )) return path def get_path_from_parent(self, parent): """ Return a list of PathInfos containing the path from the parent model to the current model, or an empty list if parent is not a parent of the current model. """ if self.model is parent: return [] model = self.concrete_model # Get a reversed base chain including both the current and parent # models. chain = model._meta.get_base_chain(parent) chain.reverse() chain.append(model) # Construct a list of the PathInfos between models in chain. path = [] for i, ancestor in enumerate(chain[:-1]): child = chain[i + 1] link = child._meta.get_ancestor_link(ancestor) path.extend(link.get_reverse_path_info()) return path def _populate_directed_relation_graph(self): """ This method is used by each model to find its reverse objects. As this method is very expensive and is accessed frequently (it looks up every field in a model, in every app), it is computed on first access and then is set as a property on every model. """ related_objects_graph = defaultdict(list) all_models = self.apps.get_models(include_auto_created=True) for model in all_models: opts = model._meta # Abstract model's fields are copied to child models, hence we will # see the fields from the child models. if opts.abstract: continue fields_with_relations = ( f for f in opts._get_fields(reverse=False, include_parents=False) if f.is_relation and f.related_model is not None ) for f in fields_with_relations: if not isinstance(f.remote_field.model, str): remote_label = f.remote_field.model._meta.concrete_model._meta.label related_objects_graph[remote_label].append(f) for model in all_models: # Set the relation_tree using the internal __dict__. In this way # we avoid calling the cached property. In attribute lookup, # __dict__ takes precedence over a data descriptor (such as # @cached_property). This means that the _meta._relation_tree is # only called if related_objects is not in __dict__. related_objects = related_objects_graph[model._meta.concrete_model._meta.label] model._meta.__dict__['_relation_tree'] = related_objects # It seems it is possible that self is not in all_models, so guard # against that with default for get(). return self.__dict__.get('_relation_tree', EMPTY_RELATION_TREE) @cached_property def _relation_tree(self): return self._populate_directed_relation_graph() def _expire_cache(self, forward=True, reverse=True): # This method is usually called by apps.cache_clear(), when the # registry is finalized, or when a new field is added. if forward: for cache_key in self.FORWARD_PROPERTIES: if cache_key in self.__dict__: delattr(self, cache_key) if reverse and not self.abstract: for cache_key in self.REVERSE_PROPERTIES: if cache_key in self.__dict__: delattr(self, cache_key) self._get_fields_cache = {} def get_fields(self, include_parents=True, include_hidden=False): """ Return a list of fields associated to the model. By default, include forward and reverse fields, fields derived from inheritance, but not hidden fields. The returned fields can be changed using the parameters: - include_parents: include fields derived from inheritance - include_hidden: include fields that have a related_name that starts with a "+" """ if include_parents is False: include_parents = PROXY_PARENTS return self._get_fields(include_parents=include_parents, include_hidden=include_hidden) def _get_fields(self, forward=True, reverse=True, include_parents=True, include_hidden=False, seen_models=None): """ Internal helper function to return fields of the model. * If forward=True, then fields defined on this model are returned. * If reverse=True, then relations pointing to this model are returned. * If include_hidden=True, then fields with is_hidden=True are returned. * The include_parents argument toggles if fields from parent models should be included. It has three values: True, False, and PROXY_PARENTS. When set to PROXY_PARENTS, the call will return all fields defined for the current model or any of its parents in the parent chain to the model's concrete model. """ if include_parents not in (True, False, PROXY_PARENTS): raise TypeError("Invalid argument for include_parents: %s" % (include_parents,)) # This helper function is used to allow recursion in ``get_fields()`` # implementation and to provide a fast way for Django's internals to # access specific subsets of fields. # We must keep track of which models we have already seen. Otherwise we # could include the same field multiple times from different models. topmost_call = seen_models is None if topmost_call: seen_models = set() seen_models.add(self.model) # Creates a cache key composed of all arguments cache_key = (forward, reverse, include_parents, include_hidden, topmost_call) try: # In order to avoid list manipulation. Always return a shallow copy # of the results. return self._get_fields_cache[cache_key] except KeyError: pass fields = [] # Recursively call _get_fields() on each parent, with the same # options provided in this call. if include_parents is not False: for parent in self.parents: # In diamond inheritance it is possible that we see the same # model from two different routes. In that case, avoid adding # fields from the same parent again. if parent in seen_models: continue if (parent._meta.concrete_model != self.concrete_model and include_parents == PROXY_PARENTS): continue for obj in parent._meta._get_fields( forward=forward, reverse=reverse, include_parents=include_parents, include_hidden=include_hidden, seen_models=seen_models): if not getattr(obj, 'parent_link', False) or obj.model == self.concrete_model: fields.append(obj) if reverse and not self.proxy: # Tree is computed once and cached until the app cache is expired. # It is composed of a list of fields pointing to the current model # from other models. all_fields = self._relation_tree for field in all_fields: # If hidden fields should be included or the relation is not # intentionally hidden, add to the fields dict. if include_hidden or not field.remote_field.hidden: fields.append(field.remote_field) if forward: fields += self.local_fields fields += self.local_many_to_many # Private fields are recopied to each child model, and they get a # different model as field.model in each child. Hence we have to # add the private fields separately from the topmost call. If we # did this recursively similar to local_fields, we would get field # instances with field.model != self.model. if topmost_call: fields += self.private_fields # In order to avoid list manipulation. Always # return a shallow copy of the results fields = make_immutable_fields_list("get_fields()", fields) # Store result into cache for later access self._get_fields_cache[cache_key] = fields return fields @cached_property def total_unique_constraints(self): """ Return a list of total unique constraints. Useful for determining set of fields guaranteed to be unique for all rows. """ return [ constraint for constraint in self.constraints if isinstance(constraint, UniqueConstraint) and constraint.condition is None ] @cached_property def _property_names(self): """Return a set of the names of the properties defined on the model.""" names = [] for name in dir(self.model): attr = inspect.getattr_static(self.model, name) if isinstance(attr, property): names.append(name) return frozenset(names) @cached_property def db_returning_fields(self): """ Private API intended only to be used by Django itself. Fields to be returned after a database insert. """ return [ field for field in self._get_fields(forward=True, reverse=False, include_parents=PROXY_PARENTS) if getattr(field, 'db_returning', False) ]
f5f987b72997ac24a7cb744869169106bfab8b5f320c5fbb0bdcc12edd9ee086
import copy import inspect import warnings from functools import partialmethod from itertools import chain from django.apps import apps from django.conf import settings from django.core import checks from django.core.exceptions import ( NON_FIELD_ERRORS, FieldDoesNotExist, FieldError, MultipleObjectsReturned, ObjectDoesNotExist, ValidationError, ) from django.db import ( DEFAULT_DB_ALIAS, DJANGO_VERSION_PICKLE_KEY, DatabaseError, connection, connections, router, transaction, ) from django.db.models import ( NOT_PROVIDED, ExpressionWrapper, IntegerField, Max, Value, ) from django.db.models.constants import LOOKUP_SEP from django.db.models.constraints import CheckConstraint, UniqueConstraint from django.db.models.deletion import CASCADE, Collector from django.db.models.fields.related import ( ForeignObjectRel, OneToOneField, lazy_related_operation, resolve_relation, ) from django.db.models.functions import Coalesce from django.db.models.manager import Manager from django.db.models.options import Options from django.db.models.query import Q from django.db.models.signals import ( class_prepared, post_init, post_save, pre_init, pre_save, ) from django.db.models.utils import make_model_tuple from django.utils.encoding import force_str from django.utils.hashable import make_hashable from django.utils.text import capfirst, get_text_list from django.utils.translation import gettext_lazy as _ from django.utils.version import get_version class Deferred: def __repr__(self): return '<Deferred field>' def __str__(self): return '<Deferred field>' DEFERRED = Deferred() def subclass_exception(name, bases, module, attached_to): """ Create exception subclass. Used by ModelBase below. The exception is created in a way that allows it to be pickled, assuming that the returned exception class will be added as an attribute to the 'attached_to' class. """ return type(name, bases, { '__module__': module, '__qualname__': '%s.%s' % (attached_to.__qualname__, name), }) def _has_contribute_to_class(value): # Only call contribute_to_class() if it's bound. return not inspect.isclass(value) and hasattr(value, 'contribute_to_class') class ModelBase(type): """Metaclass for all models.""" def __new__(cls, name, bases, attrs, **kwargs): super_new = super().__new__ # Also ensure initialization is only performed for subclasses of Model # (excluding Model class itself). parents = [b for b in bases if isinstance(b, ModelBase)] if not parents: return super_new(cls, name, bases, attrs) # Create the class. module = attrs.pop('__module__') new_attrs = {'__module__': module} classcell = attrs.pop('__classcell__', None) if classcell is not None: new_attrs['__classcell__'] = classcell attr_meta = attrs.pop('Meta', None) # Pass all attrs without a (Django-specific) contribute_to_class() # method to type.__new__() so that they're properly initialized # (i.e. __set_name__()). contributable_attrs = {} for obj_name, obj in list(attrs.items()): if _has_contribute_to_class(obj): contributable_attrs[obj_name] = obj else: new_attrs[obj_name] = obj new_class = super_new(cls, name, bases, new_attrs, **kwargs) abstract = getattr(attr_meta, 'abstract', False) meta = attr_meta or getattr(new_class, 'Meta', None) base_meta = getattr(new_class, '_meta', None) app_label = None # Look for an application configuration to attach the model to. app_config = apps.get_containing_app_config(module) if getattr(meta, 'app_label', None) is None: if app_config is None: if not abstract: raise RuntimeError( "Model class %s.%s doesn't declare an explicit " "app_label and isn't in an application in " "INSTALLED_APPS." % (module, name) ) else: app_label = app_config.label new_class.add_to_class('_meta', Options(meta, app_label)) if not abstract: new_class.add_to_class( 'DoesNotExist', subclass_exception( 'DoesNotExist', tuple( x.DoesNotExist for x in parents if hasattr(x, '_meta') and not x._meta.abstract ) or (ObjectDoesNotExist,), module, attached_to=new_class)) new_class.add_to_class( 'MultipleObjectsReturned', subclass_exception( 'MultipleObjectsReturned', tuple( x.MultipleObjectsReturned for x in parents if hasattr(x, '_meta') and not x._meta.abstract ) or (MultipleObjectsReturned,), module, attached_to=new_class)) if base_meta and not base_meta.abstract: # Non-abstract child classes inherit some attributes from their # non-abstract parent (unless an ABC comes before it in the # method resolution order). if not hasattr(meta, 'ordering'): new_class._meta.ordering = base_meta.ordering if not hasattr(meta, 'get_latest_by'): new_class._meta.get_latest_by = base_meta.get_latest_by is_proxy = new_class._meta.proxy # If the model is a proxy, ensure that the base class # hasn't been swapped out. if is_proxy and base_meta and base_meta.swapped: raise TypeError("%s cannot proxy the swapped model '%s'." % (name, base_meta.swapped)) # Add remaining attributes (those with a contribute_to_class() method) # to the class. for obj_name, obj in contributable_attrs.items(): new_class.add_to_class(obj_name, obj) # All the fields of any type declared on this model new_fields = chain( new_class._meta.local_fields, new_class._meta.local_many_to_many, new_class._meta.private_fields ) field_names = {f.name for f in new_fields} # Basic setup for proxy models. if is_proxy: base = None for parent in [kls for kls in parents if hasattr(kls, '_meta')]: if parent._meta.abstract: if parent._meta.fields: raise TypeError( "Abstract base class containing model fields not " "permitted for proxy model '%s'." % name ) else: continue if base is None: base = parent elif parent._meta.concrete_model is not base._meta.concrete_model: raise TypeError("Proxy model '%s' has more than one non-abstract model base class." % name) if base is None: raise TypeError("Proxy model '%s' has no non-abstract model base class." % name) new_class._meta.setup_proxy(base) new_class._meta.concrete_model = base._meta.concrete_model else: new_class._meta.concrete_model = new_class # Collect the parent links for multi-table inheritance. parent_links = {} for base in reversed([new_class] + parents): # Conceptually equivalent to `if base is Model`. if not hasattr(base, '_meta'): continue # Skip concrete parent classes. if base != new_class and not base._meta.abstract: continue # Locate OneToOneField instances. for field in base._meta.local_fields: if isinstance(field, OneToOneField) and field.remote_field.parent_link: related = resolve_relation(new_class, field.remote_field.model) parent_links[make_model_tuple(related)] = field # Track fields inherited from base models. inherited_attributes = set() # Do the appropriate setup for any model parents. for base in new_class.mro(): if base not in parents or not hasattr(base, '_meta'): # Things without _meta aren't functional models, so they're # uninteresting parents. inherited_attributes.update(base.__dict__) continue parent_fields = base._meta.local_fields + base._meta.local_many_to_many if not base._meta.abstract: # Check for clashes between locally declared fields and those # on the base classes. for field in parent_fields: if field.name in field_names: raise FieldError( 'Local field %r in class %r clashes with field of ' 'the same name from base class %r.' % ( field.name, name, base.__name__, ) ) else: inherited_attributes.add(field.name) # Concrete classes... base = base._meta.concrete_model base_key = make_model_tuple(base) if base_key in parent_links: field = parent_links[base_key] elif not is_proxy: attr_name = '%s_ptr' % base._meta.model_name field = OneToOneField( base, on_delete=CASCADE, name=attr_name, auto_created=True, parent_link=True, ) if attr_name in field_names: raise FieldError( "Auto-generated field '%s' in class %r for " "parent_link to base class %r clashes with " "declared field of the same name." % ( attr_name, name, base.__name__, ) ) # Only add the ptr field if it's not already present; # e.g. migrations will already have it specified if not hasattr(new_class, attr_name): new_class.add_to_class(attr_name, field) else: field = None new_class._meta.parents[base] = field else: base_parents = base._meta.parents.copy() # Add fields from abstract base class if it wasn't overridden. for field in parent_fields: if (field.name not in field_names and field.name not in new_class.__dict__ and field.name not in inherited_attributes): new_field = copy.deepcopy(field) new_class.add_to_class(field.name, new_field) # Replace parent links defined on this base by the new # field. It will be appropriately resolved if required. if field.one_to_one: for parent, parent_link in base_parents.items(): if field == parent_link: base_parents[parent] = new_field # Pass any non-abstract parent classes onto child. new_class._meta.parents.update(base_parents) # Inherit private fields (like GenericForeignKey) from the parent # class for field in base._meta.private_fields: if field.name in field_names: if not base._meta.abstract: raise FieldError( 'Local field %r in class %r clashes with field of ' 'the same name from base class %r.' % ( field.name, name, base.__name__, ) ) else: field = copy.deepcopy(field) if not base._meta.abstract: field.mti_inherited = True new_class.add_to_class(field.name, field) # Copy indexes so that index names are unique when models extend an # abstract model. new_class._meta.indexes = [copy.deepcopy(idx) for idx in new_class._meta.indexes] if abstract: # Abstract base models can't be instantiated and don't appear in # the list of models for an app. We do the final setup for them a # little differently from normal models. attr_meta.abstract = False new_class.Meta = attr_meta return new_class new_class._prepare() new_class._meta.apps.register_model(new_class._meta.app_label, new_class) return new_class def add_to_class(cls, name, value): if _has_contribute_to_class(value): value.contribute_to_class(cls, name) else: setattr(cls, name, value) def _prepare(cls): """Create some methods once self._meta has been populated.""" opts = cls._meta opts._prepare(cls) if opts.order_with_respect_to: cls.get_next_in_order = partialmethod(cls._get_next_or_previous_in_order, is_next=True) cls.get_previous_in_order = partialmethod(cls._get_next_or_previous_in_order, is_next=False) # Defer creating accessors on the foreign class until it has been # created and registered. If remote_field is None, we're ordering # with respect to a GenericForeignKey and don't know what the # foreign class is - we'll add those accessors later in # contribute_to_class(). if opts.order_with_respect_to.remote_field: wrt = opts.order_with_respect_to remote = wrt.remote_field.model lazy_related_operation(make_foreign_order_accessors, cls, remote) # Give the class a docstring -- its definition. if cls.__doc__ is None: cls.__doc__ = "%s(%s)" % (cls.__name__, ", ".join(f.name for f in opts.fields)) get_absolute_url_override = settings.ABSOLUTE_URL_OVERRIDES.get(opts.label_lower) if get_absolute_url_override: setattr(cls, 'get_absolute_url', get_absolute_url_override) if not opts.managers: if any(f.name == 'objects' for f in opts.fields): raise ValueError( "Model %s must specify a custom Manager, because it has a " "field named 'objects'." % cls.__name__ ) manager = Manager() manager.auto_created = True cls.add_to_class('objects', manager) # Set the name of _meta.indexes. This can't be done in # Options.contribute_to_class() because fields haven't been added to # the model at that point. for index in cls._meta.indexes: if not index.name: index.set_name_with_model(cls) class_prepared.send(sender=cls) @property def _base_manager(cls): return cls._meta.base_manager @property def _default_manager(cls): return cls._meta.default_manager class ModelStateFieldsCacheDescriptor: def __get__(self, instance, cls=None): if instance is None: return self res = instance.fields_cache = {} return res class ModelState: """Store model instance state.""" db = None # If true, uniqueness validation checks will consider this a new, unsaved # object. Necessary for correct validation of new instances of objects with # explicit (non-auto) PKs. This impacts validation only; it has no effect # on the actual save. adding = True fields_cache = ModelStateFieldsCacheDescriptor() class Model(metaclass=ModelBase): def __init__(self, *args, **kwargs): # Alias some things as locals to avoid repeat global lookups cls = self.__class__ opts = self._meta _setattr = setattr _DEFERRED = DEFERRED pre_init.send(sender=cls, args=args, kwargs=kwargs) # Set up the storage for instance state self._state = ModelState() # There is a rather weird disparity here; if kwargs, it's set, then args # overrides it. It should be one or the other; don't duplicate the work # The reason for the kwargs check is that standard iterator passes in by # args, and instantiation for iteration is 33% faster. if len(args) > len(opts.concrete_fields): # Daft, but matches old exception sans the err msg. raise IndexError("Number of args exceeds number of fields") if not kwargs: fields_iter = iter(opts.concrete_fields) # The ordering of the zip calls matter - zip throws StopIteration # when an iter throws it. So if the first iter throws it, the second # is *not* consumed. We rely on this, so don't change the order # without changing the logic. for val, field in zip(args, fields_iter): if val is _DEFERRED: continue _setattr(self, field.attname, val) else: # Slower, kwargs-ready version. fields_iter = iter(opts.fields) for val, field in zip(args, fields_iter): if val is _DEFERRED: continue _setattr(self, field.attname, val) kwargs.pop(field.name, None) # Now we're left with the unprocessed fields that *must* come from # keywords, or default. for field in fields_iter: is_related_object = False # Virtual field if field.attname not in kwargs and field.column is None: continue if kwargs: if isinstance(field.remote_field, ForeignObjectRel): try: # Assume object instance was passed in. rel_obj = kwargs.pop(field.name) is_related_object = True except KeyError: try: # Object instance wasn't passed in -- must be an ID. val = kwargs.pop(field.attname) except KeyError: val = field.get_default() else: try: val = kwargs.pop(field.attname) except KeyError: # This is done with an exception rather than the # default argument on pop because we don't want # get_default() to be evaluated, and then not used. # Refs #12057. val = field.get_default() else: val = field.get_default() if is_related_object: # If we are passed a related instance, set it using the # field.name instead of field.attname (e.g. "user" instead of # "user_id") so that the object gets properly cached (and type # checked) by the RelatedObjectDescriptor. if rel_obj is not _DEFERRED: _setattr(self, field.name, rel_obj) else: if val is not _DEFERRED: _setattr(self, field.attname, val) if kwargs: property_names = opts._property_names for prop in tuple(kwargs): try: # Any remaining kwargs must correspond to properties or # virtual fields. if prop in property_names or opts.get_field(prop): if kwargs[prop] is not _DEFERRED: _setattr(self, prop, kwargs[prop]) del kwargs[prop] except (AttributeError, FieldDoesNotExist): pass for kwarg in kwargs: raise TypeError("%s() got an unexpected keyword argument '%s'" % (cls.__name__, kwarg)) super().__init__() post_init.send(sender=cls, instance=self) @classmethod def from_db(cls, db, field_names, values): if len(values) != len(cls._meta.concrete_fields): values_iter = iter(values) values = [ next(values_iter) if f.attname in field_names else DEFERRED for f in cls._meta.concrete_fields ] new = cls(*values) new._state.adding = False new._state.db = db return new def __repr__(self): return '<%s: %s>' % (self.__class__.__name__, self) def __str__(self): return '%s object (%s)' % (self.__class__.__name__, self.pk) def __eq__(self, other): if not isinstance(other, Model): return NotImplemented if self._meta.concrete_model != other._meta.concrete_model: return False my_pk = self.pk if my_pk is None: return self is other return my_pk == other.pk def __hash__(self): if self.pk is None: raise TypeError("Model instances without primary key value are unhashable") return hash(self.pk) def __reduce__(self): data = self.__getstate__() data[DJANGO_VERSION_PICKLE_KEY] = get_version() class_id = self._meta.app_label, self._meta.object_name return model_unpickle, (class_id,), data def __getstate__(self): """Hook to allow choosing the attributes to pickle.""" return self.__dict__ def __setstate__(self, state): msg = None pickled_version = state.get(DJANGO_VERSION_PICKLE_KEY) if pickled_version: current_version = get_version() if current_version != pickled_version: msg = ( "Pickled model instance's Django version %s does not match " "the current version %s." % (pickled_version, current_version) ) else: msg = "Pickled model instance's Django version is not specified." if msg: warnings.warn(msg, RuntimeWarning, stacklevel=2) self.__dict__.update(state) def _get_pk_val(self, meta=None): meta = meta or self._meta return getattr(self, meta.pk.attname) def _set_pk_val(self, value): for parent_link in self._meta.parents.values(): if parent_link and parent_link != self._meta.pk: setattr(self, parent_link.target_field.attname, value) return setattr(self, self._meta.pk.attname, value) pk = property(_get_pk_val, _set_pk_val) def get_deferred_fields(self): """ Return a set containing names of deferred fields for this instance. """ return { f.attname for f in self._meta.concrete_fields if f.attname not in self.__dict__ } def refresh_from_db(self, using=None, fields=None): """ Reload field values from the database. By default, the reloading happens from the database this instance was loaded from, or by the read router if this instance wasn't loaded from any database. The using parameter will override the default. Fields can be used to specify which fields to reload. The fields should be an iterable of field attnames. If fields is None, then all non-deferred fields are reloaded. When accessing deferred fields of an instance, the deferred loading of the field will call this method. """ if fields is None: self._prefetched_objects_cache = {} else: prefetched_objects_cache = getattr(self, '_prefetched_objects_cache', ()) for field in fields: if field in prefetched_objects_cache: del prefetched_objects_cache[field] fields.remove(field) if not fields: return if any(LOOKUP_SEP in f for f in fields): raise ValueError( 'Found "%s" in fields argument. Relations and transforms ' 'are not allowed in fields.' % LOOKUP_SEP) hints = {'instance': self} db_instance_qs = self.__class__._base_manager.db_manager(using, hints=hints).filter(pk=self.pk) # Use provided fields, if not set then reload all non-deferred fields. deferred_fields = self.get_deferred_fields() if fields is not None: fields = list(fields) db_instance_qs = db_instance_qs.only(*fields) elif deferred_fields: fields = [f.attname for f in self._meta.concrete_fields if f.attname not in deferred_fields] db_instance_qs = db_instance_qs.only(*fields) db_instance = db_instance_qs.get() non_loaded_fields = db_instance.get_deferred_fields() for field in self._meta.concrete_fields: if field.attname in non_loaded_fields: # This field wasn't refreshed - skip ahead. continue setattr(self, field.attname, getattr(db_instance, field.attname)) # Clear cached foreign keys. if field.is_relation and field.is_cached(self): field.delete_cached_value(self) # Clear cached relations. for field in self._meta.related_objects: if field.is_cached(self): field.delete_cached_value(self) self._state.db = db_instance._state.db def serializable_value(self, field_name): """ Return the value of the field name for this instance. If the field is a foreign key, return the id value instead of the object. If there's no Field object with this name on the model, return the model attribute's value. Used to serialize a field's value (in the serializer, or form output, for example). Normally, you would just access the attribute directly and not use this method. """ try: field = self._meta.get_field(field_name) except FieldDoesNotExist: return getattr(self, field_name) return getattr(self, field.attname) def save(self, force_insert=False, force_update=False, using=None, update_fields=None): """ Save the current instance. Override this in a subclass if you want to control the saving process. The 'force_insert' and 'force_update' parameters can be used to insist that the "save" must be an SQL insert or update (or equivalent for non-SQL backends), respectively. Normally, they should not be set. """ # Ensure that a model instance without a PK hasn't been assigned to # a ForeignKey or OneToOneField on this model. If the field is # nullable, allowing the save() would result in silent data loss. for field in self._meta.concrete_fields: # If the related field isn't cached, then an instance hasn't # been assigned and there's no need to worry about this check. if field.is_relation and field.is_cached(self): obj = getattr(self, field.name, None) if not obj: continue # A pk may have been assigned manually to a model instance not # saved to the database (or auto-generated in a case like # UUIDField), but we allow the save to proceed and rely on the # database to raise an IntegrityError if applicable. If # constraints aren't supported by the database, there's the # unavoidable risk of data corruption. if obj.pk is None: # Remove the object from a related instance cache. if not field.remote_field.multiple: field.remote_field.delete_cached_value(obj) raise ValueError( "save() prohibited to prevent data loss due to " "unsaved related object '%s'." % field.name ) elif getattr(self, field.attname) is None: # Use pk from related object if it has been saved after # an assignment. setattr(self, field.attname, obj.pk) # If the relationship's pk/to_field was changed, clear the # cached relationship. if getattr(obj, field.target_field.attname) != getattr(self, field.attname): field.delete_cached_value(self) using = using or router.db_for_write(self.__class__, instance=self) if force_insert and (force_update or update_fields): raise ValueError("Cannot force both insert and updating in model saving.") deferred_fields = self.get_deferred_fields() if update_fields is not None: # If update_fields is empty, skip the save. We do also check for # no-op saves later on for inheritance cases. This bailout is # still needed for skipping signal sending. if not update_fields: return update_fields = frozenset(update_fields) field_names = set() for field in self._meta.fields: if not field.primary_key: field_names.add(field.name) if field.name != field.attname: field_names.add(field.attname) non_model_fields = update_fields.difference(field_names) if non_model_fields: raise ValueError("The following fields do not exist in this " "model or are m2m fields: %s" % ', '.join(non_model_fields)) # If saving to the same database, and this model is deferred, then # automatically do an "update_fields" save on the loaded fields. elif not force_insert and deferred_fields and using == self._state.db: field_names = set() for field in self._meta.concrete_fields: if not field.primary_key and not hasattr(field, 'through'): field_names.add(field.attname) loaded_fields = field_names.difference(deferred_fields) if loaded_fields: update_fields = frozenset(loaded_fields) self.save_base(using=using, force_insert=force_insert, force_update=force_update, update_fields=update_fields) save.alters_data = True def save_base(self, raw=False, force_insert=False, force_update=False, using=None, update_fields=None): """ Handle the parts of saving which should be done only once per save, yet need to be done in raw saves, too. This includes some sanity checks and signal sending. The 'raw' argument is telling save_base not to save any parent models and not to do any changes to the values before save. This is used by fixture loading. """ using = using or router.db_for_write(self.__class__, instance=self) assert not (force_insert and (force_update or update_fields)) assert update_fields is None or update_fields cls = origin = self.__class__ # Skip proxies, but keep the origin as the proxy model. if cls._meta.proxy: cls = cls._meta.concrete_model meta = cls._meta if not meta.auto_created: pre_save.send( sender=origin, instance=self, raw=raw, using=using, update_fields=update_fields, ) # A transaction isn't needed if one query is issued. if meta.parents: context_manager = transaction.atomic(using=using, savepoint=False) else: context_manager = transaction.mark_for_rollback_on_error(using=using) with context_manager: parent_inserted = False if not raw: parent_inserted = self._save_parents(cls, using, update_fields) updated = self._save_table( raw, cls, force_insert or parent_inserted, force_update, using, update_fields, ) # Store the database on which the object was saved self._state.db = using # Once saved, this is no longer a to-be-added instance. self._state.adding = False # Signal that the save is complete if not meta.auto_created: post_save.send( sender=origin, instance=self, created=(not updated), update_fields=update_fields, raw=raw, using=using, ) save_base.alters_data = True def _save_parents(self, cls, using, update_fields): """Save all the parents of cls using values from self.""" meta = cls._meta inserted = False for parent, field in meta.parents.items(): # Make sure the link fields are synced between parent and self. if (field and getattr(self, parent._meta.pk.attname) is None and getattr(self, field.attname) is not None): setattr(self, parent._meta.pk.attname, getattr(self, field.attname)) parent_inserted = self._save_parents(cls=parent, using=using, update_fields=update_fields) updated = self._save_table( cls=parent, using=using, update_fields=update_fields, force_insert=parent_inserted, ) if not updated: inserted = True # Set the parent's PK value to self. if field: setattr(self, field.attname, self._get_pk_val(parent._meta)) # Since we didn't have an instance of the parent handy set # attname directly, bypassing the descriptor. Invalidate # the related object cache, in case it's been accidentally # populated. A fresh instance will be re-built from the # database if necessary. if field.is_cached(self): field.delete_cached_value(self) return inserted def _save_table(self, raw=False, cls=None, force_insert=False, force_update=False, using=None, update_fields=None): """ Do the heavy-lifting involved in saving. Update or insert the data for a single table. """ meta = cls._meta non_pks = [f for f in meta.local_concrete_fields if not f.primary_key] if update_fields: non_pks = [f for f in non_pks if f.name in update_fields or f.attname in update_fields] pk_val = self._get_pk_val(meta) if pk_val is None: pk_val = meta.pk.get_pk_value_on_save(self) setattr(self, meta.pk.attname, pk_val) pk_set = pk_val is not None if not pk_set and (force_update or update_fields): raise ValueError("Cannot force an update in save() with no primary key.") updated = False # Skip an UPDATE when adding an instance and primary key has a default. if ( not raw and not force_insert and self._state.adding and meta.pk.default and meta.pk.default is not NOT_PROVIDED ): force_insert = True # If possible, try an UPDATE. If that doesn't update anything, do an INSERT. if pk_set and not force_insert: base_qs = cls._base_manager.using(using) values = [(f, None, (getattr(self, f.attname) if raw else f.pre_save(self, False))) for f in non_pks] forced_update = update_fields or force_update updated = self._do_update(base_qs, using, pk_val, values, update_fields, forced_update) if force_update and not updated: raise DatabaseError("Forced update did not affect any rows.") if update_fields and not updated: raise DatabaseError("Save with update_fields did not affect any rows.") if not updated: if meta.order_with_respect_to: # If this is a model with an order_with_respect_to # autopopulate the _order field field = meta.order_with_respect_to filter_args = field.get_filter_kwargs_for_object(self) self._order = cls._base_manager.using(using).filter(**filter_args).aggregate( _order__max=Coalesce( ExpressionWrapper(Max('_order') + Value(1), output_field=IntegerField()), Value(0), ), )['_order__max'] fields = meta.local_concrete_fields if not pk_set: fields = [f for f in fields if f is not meta.auto_field] returning_fields = meta.db_returning_fields results = self._do_insert(cls._base_manager, using, fields, returning_fields, raw) if results: for value, field in zip(results[0], returning_fields): setattr(self, field.attname, value) return updated def _do_update(self, base_qs, using, pk_val, values, update_fields, forced_update): """ Try to update the model. Return True if the model was updated (if an update query was done and a matching row was found in the DB). """ filtered = base_qs.filter(pk=pk_val) if not values: # We can end up here when saving a model in inheritance chain where # update_fields doesn't target any field in current model. In that # case we just say the update succeeded. Another case ending up here # is a model with just PK - in that case check that the PK still # exists. return update_fields is not None or filtered.exists() if self._meta.select_on_save and not forced_update: return ( filtered.exists() and # It may happen that the object is deleted from the DB right after # this check, causing the subsequent UPDATE to return zero matching # rows. The same result can occur in some rare cases when the # database returns zero despite the UPDATE being executed # successfully (a row is matched and updated). In order to # distinguish these two cases, the object's existence in the # database is again checked for if the UPDATE query returns 0. (filtered._update(values) > 0 or filtered.exists()) ) return filtered._update(values) > 0 def _do_insert(self, manager, using, fields, returning_fields, raw): """ Do an INSERT. If returning_fields is defined then this method should return the newly created data for the model. """ return manager._insert( [self], fields=fields, returning_fields=returning_fields, using=using, raw=raw, ) def delete(self, using=None, keep_parents=False): using = using or router.db_for_write(self.__class__, instance=self) assert self.pk is not None, ( "%s object can't be deleted because its %s attribute is set to None." % (self._meta.object_name, self._meta.pk.attname) ) collector = Collector(using=using) collector.collect([self], keep_parents=keep_parents) return collector.delete() delete.alters_data = True def _get_FIELD_display(self, field): value = getattr(self, field.attname) choices_dict = dict(make_hashable(field.flatchoices)) # force_str() to coerce lazy strings. return force_str(choices_dict.get(make_hashable(value), value), strings_only=True) def _get_next_or_previous_by_FIELD(self, field, is_next, **kwargs): if not self.pk: raise ValueError("get_next/get_previous cannot be used on unsaved objects.") op = 'gt' if is_next else 'lt' order = '' if is_next else '-' param = getattr(self, field.attname) q = Q(**{'%s__%s' % (field.name, op): param}) q = q | Q(**{field.name: param, 'pk__%s' % op: self.pk}) qs = self.__class__._default_manager.using(self._state.db).filter(**kwargs).filter(q).order_by( '%s%s' % (order, field.name), '%spk' % order ) try: return qs[0] except IndexError: raise self.DoesNotExist("%s matching query does not exist." % self.__class__._meta.object_name) def _get_next_or_previous_in_order(self, is_next): cachename = "__%s_order_cache" % is_next if not hasattr(self, cachename): op = 'gt' if is_next else 'lt' order = '_order' if is_next else '-_order' order_field = self._meta.order_with_respect_to filter_args = order_field.get_filter_kwargs_for_object(self) obj = self.__class__._default_manager.filter(**filter_args).filter(**{ '_order__%s' % op: self.__class__._default_manager.values('_order').filter(**{ self._meta.pk.name: self.pk }) }).order_by(order)[:1].get() setattr(self, cachename, obj) return getattr(self, cachename) def prepare_database_save(self, field): if self.pk is None: raise ValueError("Unsaved model instance %r cannot be used in an ORM query." % self) return getattr(self, field.remote_field.get_related_field().attname) def clean(self): """ Hook for doing any extra model-wide validation after clean() has been called on every field by self.clean_fields. Any ValidationError raised by this method will not be associated with a particular field; it will have a special-case association with the field defined by NON_FIELD_ERRORS. """ pass def validate_unique(self, exclude=None): """ Check unique constraints on the model and raise ValidationError if any failed. """ unique_checks, date_checks = self._get_unique_checks(exclude=exclude) errors = self._perform_unique_checks(unique_checks) date_errors = self._perform_date_checks(date_checks) for k, v in date_errors.items(): errors.setdefault(k, []).extend(v) if errors: raise ValidationError(errors) def _get_unique_checks(self, exclude=None): """ Return a list of checks to perform. Since validate_unique() could be called from a ModelForm, some fields may have been excluded; we can't perform a unique check on a model that is missing fields involved in that check. Fields that did not validate should also be excluded, but they need to be passed in via the exclude argument. """ if exclude is None: exclude = [] unique_checks = [] unique_togethers = [(self.__class__, self._meta.unique_together)] constraints = [(self.__class__, self._meta.total_unique_constraints)] for parent_class in self._meta.get_parent_list(): if parent_class._meta.unique_together: unique_togethers.append((parent_class, parent_class._meta.unique_together)) if parent_class._meta.total_unique_constraints: constraints.append( (parent_class, parent_class._meta.total_unique_constraints) ) for model_class, unique_together in unique_togethers: for check in unique_together: if not any(name in exclude for name in check): # Add the check if the field isn't excluded. unique_checks.append((model_class, tuple(check))) for model_class, model_constraints in constraints: for constraint in model_constraints: if not any(name in exclude for name in constraint.fields): unique_checks.append((model_class, constraint.fields)) # These are checks for the unique_for_<date/year/month>. date_checks = [] # Gather a list of checks for fields declared as unique and add them to # the list of checks. fields_with_class = [(self.__class__, self._meta.local_fields)] for parent_class in self._meta.get_parent_list(): fields_with_class.append((parent_class, parent_class._meta.local_fields)) for model_class, fields in fields_with_class: for f in fields: name = f.name if name in exclude: continue if f.unique: unique_checks.append((model_class, (name,))) if f.unique_for_date and f.unique_for_date not in exclude: date_checks.append((model_class, 'date', name, f.unique_for_date)) if f.unique_for_year and f.unique_for_year not in exclude: date_checks.append((model_class, 'year', name, f.unique_for_year)) if f.unique_for_month and f.unique_for_month not in exclude: date_checks.append((model_class, 'month', name, f.unique_for_month)) return unique_checks, date_checks def _perform_unique_checks(self, unique_checks): errors = {} for model_class, unique_check in unique_checks: # Try to look up an existing object with the same values as this # object's values for all the unique field. lookup_kwargs = {} for field_name in unique_check: f = self._meta.get_field(field_name) lookup_value = getattr(self, f.attname) # TODO: Handle multiple backends with different feature flags. if (lookup_value is None or (lookup_value == '' and connection.features.interprets_empty_strings_as_nulls)): # no value, skip the lookup continue if f.primary_key and not self._state.adding: # no need to check for unique primary key when editing continue lookup_kwargs[str(field_name)] = lookup_value # some fields were skipped, no reason to do the check if len(unique_check) != len(lookup_kwargs): continue qs = model_class._default_manager.filter(**lookup_kwargs) # Exclude the current object from the query if we are editing an # instance (as opposed to creating a new one) # Note that we need to use the pk as defined by model_class, not # self.pk. These can be different fields because model inheritance # allows single model to have effectively multiple primary keys. # Refs #17615. model_class_pk = self._get_pk_val(model_class._meta) if not self._state.adding and model_class_pk is not None: qs = qs.exclude(pk=model_class_pk) if qs.exists(): if len(unique_check) == 1: key = unique_check[0] else: key = NON_FIELD_ERRORS errors.setdefault(key, []).append(self.unique_error_message(model_class, unique_check)) return errors def _perform_date_checks(self, date_checks): errors = {} for model_class, lookup_type, field, unique_for in date_checks: lookup_kwargs = {} # there's a ticket to add a date lookup, we can remove this special # case if that makes it's way in date = getattr(self, unique_for) if date is None: continue if lookup_type == 'date': lookup_kwargs['%s__day' % unique_for] = date.day lookup_kwargs['%s__month' % unique_for] = date.month lookup_kwargs['%s__year' % unique_for] = date.year else: lookup_kwargs['%s__%s' % (unique_for, lookup_type)] = getattr(date, lookup_type) lookup_kwargs[field] = getattr(self, field) qs = model_class._default_manager.filter(**lookup_kwargs) # Exclude the current object from the query if we are editing an # instance (as opposed to creating a new one) if not self._state.adding and self.pk is not None: qs = qs.exclude(pk=self.pk) if qs.exists(): errors.setdefault(field, []).append( self.date_error_message(lookup_type, field, unique_for) ) return errors def date_error_message(self, lookup_type, field_name, unique_for): opts = self._meta field = opts.get_field(field_name) return ValidationError( message=field.error_messages['unique_for_date'], code='unique_for_date', params={ 'model': self, 'model_name': capfirst(opts.verbose_name), 'lookup_type': lookup_type, 'field': field_name, 'field_label': capfirst(field.verbose_name), 'date_field': unique_for, 'date_field_label': capfirst(opts.get_field(unique_for).verbose_name), } ) def unique_error_message(self, model_class, unique_check): opts = model_class._meta params = { 'model': self, 'model_class': model_class, 'model_name': capfirst(opts.verbose_name), 'unique_check': unique_check, } # A unique field if len(unique_check) == 1: field = opts.get_field(unique_check[0]) params['field_label'] = capfirst(field.verbose_name) return ValidationError( message=field.error_messages['unique'], code='unique', params=params, ) # unique_together else: field_labels = [capfirst(opts.get_field(f).verbose_name) for f in unique_check] params['field_labels'] = get_text_list(field_labels, _('and')) return ValidationError( message=_("%(model_name)s with this %(field_labels)s already exists."), code='unique_together', params=params, ) def full_clean(self, exclude=None, validate_unique=True): """ Call clean_fields(), clean(), and validate_unique() on the model. Raise a ValidationError for any errors that occur. """ errors = {} if exclude is None: exclude = [] else: exclude = list(exclude) try: self.clean_fields(exclude=exclude) except ValidationError as e: errors = e.update_error_dict(errors) # Form.clean() is run even if other validation fails, so do the # same with Model.clean() for consistency. try: self.clean() except ValidationError as e: errors = e.update_error_dict(errors) # Run unique checks, but only for fields that passed validation. if validate_unique: for name in errors: if name != NON_FIELD_ERRORS and name not in exclude: exclude.append(name) try: self.validate_unique(exclude=exclude) except ValidationError as e: errors = e.update_error_dict(errors) if errors: raise ValidationError(errors) def clean_fields(self, exclude=None): """ Clean all fields and raise a ValidationError containing a dict of all validation errors if any occur. """ if exclude is None: exclude = [] errors = {} for f in self._meta.fields: if f.name in exclude: continue # Skip validation for empty fields with blank=True. The developer # is responsible for making sure they have a valid value. raw_value = getattr(self, f.attname) if f.blank and raw_value in f.empty_values: continue try: setattr(self, f.attname, f.clean(raw_value, self)) except ValidationError as e: errors[f.name] = e.error_list if errors: raise ValidationError(errors) @classmethod def check(cls, **kwargs): errors = [*cls._check_swappable(), *cls._check_model(), *cls._check_managers(**kwargs)] if not cls._meta.swapped: databases = kwargs.get('databases') or [] errors += [ *cls._check_fields(**kwargs), *cls._check_m2m_through_same_relationship(), *cls._check_long_column_names(databases), ] clash_errors = ( *cls._check_id_field(), *cls._check_field_name_clashes(), *cls._check_model_name_db_lookup_clashes(), *cls._check_property_name_related_field_accessor_clashes(), *cls._check_single_primary_key(), ) errors.extend(clash_errors) # If there are field name clashes, hide consequent column name # clashes. if not clash_errors: errors.extend(cls._check_column_name_clashes()) errors += [ *cls._check_index_together(), *cls._check_unique_together(), *cls._check_indexes(databases), *cls._check_ordering(), *cls._check_constraints(databases), ] return errors @classmethod def _check_swappable(cls): """Check if the swapped model exists.""" errors = [] if cls._meta.swapped: try: apps.get_model(cls._meta.swapped) except ValueError: errors.append( checks.Error( "'%s' is not of the form 'app_label.app_name'." % cls._meta.swappable, id='models.E001', ) ) except LookupError: app_label, model_name = cls._meta.swapped.split('.') errors.append( checks.Error( "'%s' references '%s.%s', which has not been " "installed, or is abstract." % ( cls._meta.swappable, app_label, model_name ), id='models.E002', ) ) return errors @classmethod def _check_model(cls): errors = [] if cls._meta.proxy: if cls._meta.local_fields or cls._meta.local_many_to_many: errors.append( checks.Error( "Proxy model '%s' contains model fields." % cls.__name__, id='models.E017', ) ) return errors @classmethod def _check_managers(cls, **kwargs): """Perform all manager checks.""" errors = [] for manager in cls._meta.managers: errors.extend(manager.check(**kwargs)) return errors @classmethod def _check_fields(cls, **kwargs): """Perform all field checks.""" errors = [] for field in cls._meta.local_fields: errors.extend(field.check(**kwargs)) for field in cls._meta.local_many_to_many: errors.extend(field.check(from_model=cls, **kwargs)) return errors @classmethod def _check_m2m_through_same_relationship(cls): """ Check if no relationship model is used by more than one m2m field. """ errors = [] seen_intermediary_signatures = [] fields = cls._meta.local_many_to_many # Skip when the target model wasn't found. fields = (f for f in fields if isinstance(f.remote_field.model, ModelBase)) # Skip when the relationship model wasn't found. fields = (f for f in fields if isinstance(f.remote_field.through, ModelBase)) for f in fields: signature = (f.remote_field.model, cls, f.remote_field.through, f.remote_field.through_fields) if signature in seen_intermediary_signatures: errors.append( checks.Error( "The model has two identical many-to-many relations " "through the intermediate model '%s'." % f.remote_field.through._meta.label, obj=cls, id='models.E003', ) ) else: seen_intermediary_signatures.append(signature) return errors @classmethod def _check_id_field(cls): """Check if `id` field is a primary key.""" fields = [f for f in cls._meta.local_fields if f.name == 'id' and f != cls._meta.pk] # fields is empty or consists of the invalid "id" field if fields and not fields[0].primary_key and cls._meta.pk.name == 'id': return [ checks.Error( "'id' can only be used as a field name if the field also " "sets 'primary_key=True'.", obj=cls, id='models.E004', ) ] else: return [] @classmethod def _check_field_name_clashes(cls): """Forbid field shadowing in multi-table inheritance.""" errors = [] used_fields = {} # name or attname -> field # Check that multi-inheritance doesn't cause field name shadowing. for parent in cls._meta.get_parent_list(): for f in parent._meta.local_fields: clash = used_fields.get(f.name) or used_fields.get(f.attname) or None if clash: errors.append( checks.Error( "The field '%s' from parent model " "'%s' clashes with the field '%s' " "from parent model '%s'." % ( clash.name, clash.model._meta, f.name, f.model._meta ), obj=cls, id='models.E005', ) ) used_fields[f.name] = f used_fields[f.attname] = f # Check that fields defined in the model don't clash with fields from # parents, including auto-generated fields like multi-table inheritance # child accessors. for parent in cls._meta.get_parent_list(): for f in parent._meta.get_fields(): if f not in used_fields: used_fields[f.name] = f for f in cls._meta.local_fields: clash = used_fields.get(f.name) or used_fields.get(f.attname) or None # Note that we may detect clash between user-defined non-unique # field "id" and automatically added unique field "id", both # defined at the same model. This special case is considered in # _check_id_field and here we ignore it. id_conflict = f.name == "id" and clash and clash.name == "id" and clash.model == cls if clash and not id_conflict: errors.append( checks.Error( "The field '%s' clashes with the field '%s' " "from model '%s'." % ( f.name, clash.name, clash.model._meta ), obj=f, id='models.E006', ) ) used_fields[f.name] = f used_fields[f.attname] = f return errors @classmethod def _check_column_name_clashes(cls): # Store a list of column names which have already been used by other fields. used_column_names = [] errors = [] for f in cls._meta.local_fields: _, column_name = f.get_attname_column() # Ensure the column name is not already in use. if column_name and column_name in used_column_names: errors.append( checks.Error( "Field '%s' has column name '%s' that is used by " "another field." % (f.name, column_name), hint="Specify a 'db_column' for the field.", obj=cls, id='models.E007' ) ) else: used_column_names.append(column_name) return errors @classmethod def _check_model_name_db_lookup_clashes(cls): errors = [] model_name = cls.__name__ if model_name.startswith('_') or model_name.endswith('_'): errors.append( checks.Error( "The model name '%s' cannot start or end with an underscore " "as it collides with the query lookup syntax." % model_name, obj=cls, id='models.E023' ) ) elif LOOKUP_SEP in model_name: errors.append( checks.Error( "The model name '%s' cannot contain double underscores as " "it collides with the query lookup syntax." % model_name, obj=cls, id='models.E024' ) ) return errors @classmethod def _check_property_name_related_field_accessor_clashes(cls): errors = [] property_names = cls._meta._property_names related_field_accessors = ( f.get_attname() for f in cls._meta._get_fields(reverse=False) if f.is_relation and f.related_model is not None ) for accessor in related_field_accessors: if accessor in property_names: errors.append( checks.Error( "The property '%s' clashes with a related field " "accessor." % accessor, obj=cls, id='models.E025', ) ) return errors @classmethod def _check_single_primary_key(cls): errors = [] if sum(1 for f in cls._meta.local_fields if f.primary_key) > 1: errors.append( checks.Error( "The model cannot have more than one field with " "'primary_key=True'.", obj=cls, id='models.E026', ) ) return errors @classmethod def _check_index_together(cls): """Check the value of "index_together" option.""" if not isinstance(cls._meta.index_together, (tuple, list)): return [ checks.Error( "'index_together' must be a list or tuple.", obj=cls, id='models.E008', ) ] elif any(not isinstance(fields, (tuple, list)) for fields in cls._meta.index_together): return [ checks.Error( "All 'index_together' elements must be lists or tuples.", obj=cls, id='models.E009', ) ] else: errors = [] for fields in cls._meta.index_together: errors.extend(cls._check_local_fields(fields, "index_together")) return errors @classmethod def _check_unique_together(cls): """Check the value of "unique_together" option.""" if not isinstance(cls._meta.unique_together, (tuple, list)): return [ checks.Error( "'unique_together' must be a list or tuple.", obj=cls, id='models.E010', ) ] elif any(not isinstance(fields, (tuple, list)) for fields in cls._meta.unique_together): return [ checks.Error( "All 'unique_together' elements must be lists or tuples.", obj=cls, id='models.E011', ) ] else: errors = [] for fields in cls._meta.unique_together: errors.extend(cls._check_local_fields(fields, "unique_together")) return errors @classmethod def _check_indexes(cls, databases): """Check fields, names, and conditions of indexes.""" errors = [] for index in cls._meta.indexes: # Index name can't start with an underscore or a number, restricted # for cross-database compatibility with Oracle. if index.name[0] == '_' or index.name[0].isdigit(): errors.append( checks.Error( "The index name '%s' cannot start with an underscore " "or a number." % index.name, obj=cls, id='models.E033', ), ) if len(index.name) > index.max_name_length: errors.append( checks.Error( "The index name '%s' cannot be longer than %d " "characters." % (index.name, index.max_name_length), obj=cls, id='models.E034', ), ) for db in databases: if not router.allow_migrate_model(db, cls): continue connection = connections[db] if ( connection.features.supports_partial_indexes or 'supports_partial_indexes' in cls._meta.required_db_features ): continue if any(index.condition is not None for index in cls._meta.indexes): errors.append( checks.Warning( '%s does not support indexes with conditions.' % connection.display_name, hint=( "Conditions will be ignored. Silence this warning " "if you don't care about it." ), obj=cls, id='models.W037', ) ) fields = [field for index in cls._meta.indexes for field, _ in index.fields_orders] errors.extend(cls._check_local_fields(fields, 'indexes')) return errors @classmethod def _check_local_fields(cls, fields, option): from django.db import models # In order to avoid hitting the relation tree prematurely, we use our # own fields_map instead of using get_field() forward_fields_map = {} for field in cls._meta._get_fields(reverse=False): forward_fields_map[field.name] = field if hasattr(field, 'attname'): forward_fields_map[field.attname] = field errors = [] for field_name in fields: try: field = forward_fields_map[field_name] except KeyError: errors.append( checks.Error( "'%s' refers to the nonexistent field '%s'." % ( option, field_name, ), obj=cls, id='models.E012', ) ) else: if isinstance(field.remote_field, models.ManyToManyRel): errors.append( checks.Error( "'%s' refers to a ManyToManyField '%s', but " "ManyToManyFields are not permitted in '%s'." % ( option, field_name, option, ), obj=cls, id='models.E013', ) ) elif field not in cls._meta.local_fields: errors.append( checks.Error( "'%s' refers to field '%s' which is not local to model '%s'." % (option, field_name, cls._meta.object_name), hint="This issue may be caused by multi-table inheritance.", obj=cls, id='models.E016', ) ) return errors @classmethod def _check_ordering(cls): """ Check "ordering" option -- is it a list of strings and do all fields exist? """ if cls._meta._ordering_clash: return [ checks.Error( "'ordering' and 'order_with_respect_to' cannot be used together.", obj=cls, id='models.E021', ), ] if cls._meta.order_with_respect_to or not cls._meta.ordering: return [] if not isinstance(cls._meta.ordering, (list, tuple)): return [ checks.Error( "'ordering' must be a tuple or list (even if you want to order by only one field).", obj=cls, id='models.E014', ) ] errors = [] fields = cls._meta.ordering # Skip expressions and '?' fields. fields = (f for f in fields if isinstance(f, str) and f != '?') # Convert "-field" to "field". fields = ((f[1:] if f.startswith('-') else f) for f in fields) # Separate related fields and non-related fields. _fields = [] related_fields = [] for f in fields: if LOOKUP_SEP in f: related_fields.append(f) else: _fields.append(f) fields = _fields # Check related fields. for field in related_fields: _cls = cls fld = None for part in field.split(LOOKUP_SEP): try: # pk is an alias that won't be found by opts.get_field. if part == 'pk': fld = _cls._meta.pk else: fld = _cls._meta.get_field(part) if fld.is_relation: _cls = fld.get_path_info()[-1].to_opts.model else: _cls = None except (FieldDoesNotExist, AttributeError): if fld is None or ( fld.get_transform(part) is None and fld.get_lookup(part) is None ): errors.append( checks.Error( "'ordering' refers to the nonexistent field, " "related field, or lookup '%s'." % field, obj=cls, id='models.E015', ) ) # Skip ordering on pk. This is always a valid order_by field # but is an alias and therefore won't be found by opts.get_field. fields = {f for f in fields if f != 'pk'} # Check for invalid or nonexistent fields in ordering. invalid_fields = [] # Any field name that is not present in field_names does not exist. # Also, ordering by m2m fields is not allowed. opts = cls._meta valid_fields = set(chain.from_iterable( (f.name, f.attname) if not (f.auto_created and not f.concrete) else (f.field.related_query_name(),) for f in chain(opts.fields, opts.related_objects) )) invalid_fields.extend(fields - valid_fields) for invalid_field in invalid_fields: errors.append( checks.Error( "'ordering' refers to the nonexistent field, related " "field, or lookup '%s'." % invalid_field, obj=cls, id='models.E015', ) ) return errors @classmethod def _check_long_column_names(cls, databases): """ Check that any auto-generated column names are shorter than the limits for each database in which the model will be created. """ if not databases: return [] errors = [] allowed_len = None db_alias = None # Find the minimum max allowed length among all specified db_aliases. for db in databases: # skip databases where the model won't be created if not router.allow_migrate_model(db, cls): continue connection = connections[db] max_name_length = connection.ops.max_name_length() if max_name_length is None or connection.features.truncates_names: continue else: if allowed_len is None: allowed_len = max_name_length db_alias = db elif max_name_length < allowed_len: allowed_len = max_name_length db_alias = db if allowed_len is None: return errors for f in cls._meta.local_fields: _, column_name = f.get_attname_column() # Check if auto-generated name for the field is too long # for the database. if f.db_column is None and column_name is not None and len(column_name) > allowed_len: errors.append( checks.Error( 'Autogenerated column name too long for field "%s". ' 'Maximum length is "%s" for database "%s".' % (column_name, allowed_len, db_alias), hint="Set the column name manually using 'db_column'.", obj=cls, id='models.E018', ) ) for f in cls._meta.local_many_to_many: # Skip nonexistent models. if isinstance(f.remote_field.through, str): continue # Check if auto-generated name for the M2M field is too long # for the database. for m2m in f.remote_field.through._meta.local_fields: _, rel_name = m2m.get_attname_column() if m2m.db_column is None and rel_name is not None and len(rel_name) > allowed_len: errors.append( checks.Error( 'Autogenerated column name too long for M2M field ' '"%s". Maximum length is "%s" for database "%s".' % (rel_name, allowed_len, db_alias), hint=( "Use 'through' to create a separate model for " "M2M and then set column_name using 'db_column'." ), obj=cls, id='models.E019', ) ) return errors @classmethod def _check_constraints(cls, databases): errors = [] for db in databases: if not router.allow_migrate_model(db, cls): continue connection = connections[db] if not ( connection.features.supports_table_check_constraints or 'supports_table_check_constraints' in cls._meta.required_db_features ) and any( isinstance(constraint, CheckConstraint) for constraint in cls._meta.constraints ): errors.append( checks.Warning( '%s does not support check constraints.' % connection.display_name, hint=( "A constraint won't be created. Silence this " "warning if you don't care about it." ), obj=cls, id='models.W027', ) ) if not ( connection.features.supports_partial_indexes or 'supports_partial_indexes' in cls._meta.required_db_features ) and any( isinstance(constraint, UniqueConstraint) and constraint.condition is not None for constraint in cls._meta.constraints ): errors.append( checks.Warning( '%s does not support unique constraints with ' 'conditions.' % connection.display_name, hint=( "A constraint won't be created. Silence this " "warning if you don't care about it." ), obj=cls, id='models.W036', ) ) if not ( connection.features.supports_deferrable_unique_constraints or 'supports_deferrable_unique_constraints' in cls._meta.required_db_features ) and any( isinstance(constraint, UniqueConstraint) and constraint.deferrable is not None for constraint in cls._meta.constraints ): errors.append( checks.Warning( '%s does not support deferrable unique constraints.' % connection.display_name, hint=( "A constraint won't be created. Silence this " "warning if you don't care about it." ), obj=cls, id='models.W038', ) ) return errors ############################################ # HELPER FUNCTIONS (CURRIED MODEL METHODS) # ############################################ # ORDERING METHODS ######################### def method_set_order(self, ordered_obj, id_list, using=None): if using is None: using = DEFAULT_DB_ALIAS order_wrt = ordered_obj._meta.order_with_respect_to filter_args = order_wrt.get_forward_related_filter(self) ordered_obj.objects.db_manager(using).filter(**filter_args).bulk_update([ ordered_obj(pk=pk, _order=order) for order, pk in enumerate(id_list) ], ['_order']) def method_get_order(self, ordered_obj): order_wrt = ordered_obj._meta.order_with_respect_to filter_args = order_wrt.get_forward_related_filter(self) pk_name = ordered_obj._meta.pk.name return ordered_obj.objects.filter(**filter_args).values_list(pk_name, flat=True) def make_foreign_order_accessors(model, related_model): setattr( related_model, 'get_%s_order' % model.__name__.lower(), partialmethod(method_get_order, model) ) setattr( related_model, 'set_%s_order' % model.__name__.lower(), partialmethod(method_set_order, model) ) ######## # MISC # ######## def model_unpickle(model_id): """Used to unpickle Model subclasses with deferred fields.""" if isinstance(model_id, tuple): model = apps.get_model(*model_id) else: # Backwards compat - the model was cached directly in earlier versions. model = model_id return model.__new__(model) model_unpickle.__safe_for_unpickle__ = True
bc10692da0718ca59a459660a82f2e1c731342b09954acf74ed9370e53be3b6e
import copy import inspect from importlib import import_module from django.db import router from django.db.models.query import QuerySet class BaseManager: # To retain order, track each time a Manager instance is created. creation_counter = 0 # Set to True for the 'objects' managers that are automatically created. auto_created = False #: If set to True the manager will be serialized into migrations and will #: thus be available in e.g. RunPython operations. use_in_migrations = False def __new__(cls, *args, **kwargs): # Capture the arguments to make returning them trivial. obj = super().__new__(cls) obj._constructor_args = (args, kwargs) return obj def __init__(self): super().__init__() self._set_creation_counter() self.model = None self.name = None self._db = None self._hints = {} def __str__(self): """Return "app_label.model_label.manager_name".""" return '%s.%s' % (self.model._meta.label, self.name) def __class_getitem__(cls, *args, **kwargs): return cls def deconstruct(self): """ Return a 5-tuple of the form (as_manager (True), manager_class, queryset_class, args, kwargs). Raise a ValueError if the manager is dynamically generated. """ qs_class = self._queryset_class if getattr(self, '_built_with_as_manager', False): # using MyQuerySet.as_manager() return ( True, # as_manager None, # manager_class '%s.%s' % (qs_class.__module__, qs_class.__name__), # qs_class None, # args None, # kwargs ) else: module_name = self.__module__ name = self.__class__.__name__ # Make sure it's actually there and not an inner class module = import_module(module_name) if not hasattr(module, name): raise ValueError( "Could not find manager %s in %s.\n" "Please note that you need to inherit from managers you " "dynamically generated with 'from_queryset()'." % (name, module_name) ) return ( False, # as_manager '%s.%s' % (module_name, name), # manager_class None, # qs_class self._constructor_args[0], # args self._constructor_args[1], # kwargs ) def check(self, **kwargs): return [] @classmethod def _get_queryset_methods(cls, queryset_class): def create_method(name, method): def manager_method(self, *args, **kwargs): return getattr(self.get_queryset(), name)(*args, **kwargs) manager_method.__name__ = method.__name__ manager_method.__doc__ = method.__doc__ return manager_method new_methods = {} for name, method in inspect.getmembers(queryset_class, predicate=inspect.isfunction): # Only copy missing methods. if hasattr(cls, name): continue # Only copy public methods or methods with the attribute `queryset_only=False`. queryset_only = getattr(method, 'queryset_only', None) if queryset_only or (queryset_only is None and name.startswith('_')): continue # Copy the method onto the manager. new_methods[name] = create_method(name, method) return new_methods @classmethod def from_queryset(cls, queryset_class, class_name=None): if class_name is None: class_name = '%sFrom%s' % (cls.__name__, queryset_class.__name__) return type(class_name, (cls,), { '_queryset_class': queryset_class, **cls._get_queryset_methods(queryset_class), }) def contribute_to_class(self, model, name): self.name = self.name or name self.model = model setattr(model, name, ManagerDescriptor(self)) model._meta.add_manager(self) def _set_creation_counter(self): """ Set the creation counter value for this instance and increment the class-level copy. """ self.creation_counter = BaseManager.creation_counter BaseManager.creation_counter += 1 def db_manager(self, using=None, hints=None): obj = copy.copy(self) obj._db = using or self._db obj._hints = hints or self._hints return obj @property def db(self): return self._db or router.db_for_read(self.model, **self._hints) ####################### # PROXIES TO QUERYSET # ####################### def get_queryset(self): """ Return a new QuerySet object. Subclasses can override this method to customize the behavior of the Manager. """ return self._queryset_class(model=self.model, using=self._db, hints=self._hints) def all(self): # We can't proxy this method through the `QuerySet` like we do for the # rest of the `QuerySet` methods. This is because `QuerySet.all()` # works by creating a "copy" of the current queryset and in making said # copy, all the cached `prefetch_related` lookups are lost. See the # implementation of `RelatedManager.get_queryset()` for a better # understanding of how this comes into play. return self.get_queryset() def __eq__(self, other): return ( isinstance(other, self.__class__) and self._constructor_args == other._constructor_args ) def __hash__(self): return id(self) class Manager(BaseManager.from_queryset(QuerySet)): pass class ManagerDescriptor: def __init__(self, manager): self.manager = manager def __get__(self, instance, cls=None): if instance is not None: raise AttributeError("Manager isn't accessible via %s instances" % cls.__name__) if cls._meta.abstract: raise AttributeError("Manager isn't available; %s is abstract" % ( cls._meta.object_name, )) if cls._meta.swapped: raise AttributeError( "Manager isn't available; '%s.%s' has been swapped for '%s'" % ( cls._meta.app_label, cls._meta.object_name, cls._meta.swapped, ) ) return cls._meta.managers_map[self.manager.name] class EmptyManager(Manager): def __init__(self, model): super().__init__() self.model = model def get_queryset(self): return super().get_queryset().none()